From 153e2d347d4bb5d1d448c707023e476ea161f1fc Mon Sep 17 00:00:00 2001 From: Nicolas Chevobbe Date: Thu, 20 Jul 2017 14:56:46 +0200 Subject: [PATCH 001/152] Bug 1382690 - Adapt extensions inspect() calls to the new frontend. r=bgrins The inspect command in extension was directly calling jsterm.inspectObject and doing so bypassed the code we implemented for the command on the new frontend (i.e. no more variable view). This patch modifies the jsterm inspectObject function so it can do the expected behavior, and also reverts some changes made in the original inspect command bug, so we only deal with the new frontend code in jsterm.inspectObject. This implied changing an extension test that was waiting for the variable view to boot-up. The test was modified to match the tests we already have for the inspect command on the new console frontend. MozReview-Commit-ID: 8fZV20Mck8r --HG-- extra : rebase_source : 59870c4bae3664aebf448acee439b918190591dc --- ..._devtools_inspectedWindow_eval_bindings.js | 18 +++++++------ devtools/client/webconsole/jsterm.js | 25 +++++++++++-------- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/browser/components/extensions/test/browser/browser_ext_devtools_inspectedWindow_eval_bindings.js b/browser/components/extensions/test/browser/browser_ext_devtools_inspectedWindow_eval_bindings.js index e851215b430d3..25705d43738fa 100644 --- a/browser/components/extensions/test/browser/browser_ext_devtools_inspectedWindow_eval_bindings.js +++ b/browser/components/extensions/test/browser/browser_ext_devtools_inspectedWindow_eval_bindings.js @@ -123,15 +123,19 @@ add_task(async function test_devtools_inspectedWindow_eval_bindings() { await toolbox.once("split-console"); let jsterm = toolbox.getPanel("webconsole").hud.jsterm; - const options = await new Promise(resolve => { - jsterm.once("variablesview-open", (evt, view, options) => resolve(options)); + // Wait for the message to appear on the console. + const messageNode = await new Promise(resolve => { + jsterm.hud.on("new-messages", function onThisMessage(e, messages) { + for (let m of messages) { + resolve(m.node); + jsterm.hud.off("new-messages", onThisMessage); + return; + } + }); }); - const objectType = options.objectActor.type; - const objectPreviewProperties = options.objectActor.preview.ownProperties; - is(objectType, "object", "The inspected object has the expected type"); - Assert.deepEqual(Object.keys(objectPreviewProperties), ["testkey"], - "The inspected object has the expected preview properties"); + let objectInspectors = [...messageNode.querySelectorAll(".tree")]; + is(objectInspectors.length, 1, "There is the expected number of object inspectors"); })(); const inspectJSObjectPromise = extension.awaitMessage(`inspectedWindow-eval-result`); diff --git a/devtools/client/webconsole/jsterm.js b/devtools/client/webconsole/jsterm.js index fed2fa82a740d..95bf01ca674a4 100644 --- a/devtools/client/webconsole/jsterm.js +++ b/devtools/client/webconsole/jsterm.js @@ -342,9 +342,7 @@ JSTerm.prototype = { this.clearHistory(); break; case "inspectObject": - if (!this.hud.NEW_CONSOLE_OUTPUT_ENABLED) { - this.inspectObjectActor(helperResult.object); - } + this.inspectObjectActor(helperResult.object); break; case "error": try { @@ -363,14 +361,9 @@ JSTerm.prototype = { } // Hide undefined results coming from JSTerm helper functions. - if (!errorMessage - && result - && typeof result == "object" - && result.type == "undefined" - && helperResult - && !helperHasRawOutput - && !(this.hud.NEW_CONSOLE_OUTPUT_ENABLED && helperResult.type === "inspectObject") - ) { + if (!errorMessage && result && typeof result == "object" && + result.type == "undefined" && + helperResult && !helperHasRawOutput) { callback && callback(); return; } @@ -409,6 +402,16 @@ JSTerm.prototype = { }, inspectObjectActor: function (objectActor) { + if (this.hud.NEW_CONSOLE_OUTPUT_ENABLED) { + this.hud.newConsoleOutput.dispatchMessageAdd({ + helperResult: { + type: "inspectObject", + object: objectActor + } + }, true); + return this.hud.newConsoleOutput; + } + return this.openVariablesView({ objectActor, label: VariablesView.getString(objectActor, {concise: true}), From d9edfa153c09955283498f39481843d55f0995d8 Mon Sep 17 00:00:00 2001 From: Kartikaya Gupta Date: Thu, 20 Jul 2017 14:14:25 -0400 Subject: [PATCH 002/152] Bug 1382744 - Make sure we process nested display items for layers-free webrender mode. r=ethlin MozReview-Commit-ID: mTox2irS1w --HG-- extra : rebase_source : 97cb2163aaf4a9f6df62688724df2ee051f55880 --- gfx/layers/wr/WebRenderLayerManager.cpp | 2 +- gfx/layers/wr/WebRenderLayerManager.h | 2 +- layout/painting/nsDisplayList.cpp | 20 ++++++++++++++++++++ layout/painting/nsDisplayList.h | 6 ++++++ 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/gfx/layers/wr/WebRenderLayerManager.cpp b/gfx/layers/wr/WebRenderLayerManager.cpp index 4d9c384aaaa21..ce93df44d49ee 100644 --- a/gfx/layers/wr/WebRenderLayerManager.cpp +++ b/gfx/layers/wr/WebRenderLayerManager.cpp @@ -191,7 +191,7 @@ PopulateScrollData(WebRenderScrollData& aTarget, Layer* aLayer) void WebRenderLayerManager::CreateWebRenderCommandsFromDisplayList(nsDisplayList* aDisplayList, nsDisplayListBuilder* aDisplayListBuilder, - StackingContextHelper& aSc, + const StackingContextHelper& aSc, wr::DisplayListBuilder& aBuilder) { nsDisplayList savedItems; diff --git a/gfx/layers/wr/WebRenderLayerManager.h b/gfx/layers/wr/WebRenderLayerManager.h index 41c7329e73623..db64bc1e51183 100644 --- a/gfx/layers/wr/WebRenderLayerManager.h +++ b/gfx/layers/wr/WebRenderLayerManager.h @@ -70,7 +70,7 @@ class WebRenderLayerManager final : public LayerManager nsDisplayListBuilder* aDisplayListBuilder); void CreateWebRenderCommandsFromDisplayList(nsDisplayList* aDisplayList, nsDisplayListBuilder* aDisplayListBuilder, - StackingContextHelper& aSc, + const StackingContextHelper& aSc, wr::DisplayListBuilder& aBuilder); void EndTransactionWithoutLayer(nsDisplayList* aDisplayList, nsDisplayListBuilder* aDisplayListBuilder); diff --git a/layout/painting/nsDisplayList.cpp b/layout/painting/nsDisplayList.cpp index 8331419087501..e922046fd9997 100644 --- a/layout/painting/nsDisplayList.cpp +++ b/layout/painting/nsDisplayList.cpp @@ -5772,6 +5772,26 @@ nsDisplayWrapList::SetReferenceFrame(const nsIFrame* aFrame) mToReferenceFrame = mFrame->GetOffsetToCrossDoc(mReferenceFrame); } +bool +nsDisplayWrapList::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBuilder, + const StackingContextHelper& aSc, + nsTArray& aParentCommands, + mozilla::layers::WebRenderLayerManager* aManager, + nsDisplayListBuilder* aDisplayListBuilder) +{ + // If this function is called in layers mode that means we created a + // WebRenderDisplayItemLayer for a display item that is a subclass of + // nsDisplayWrapList, but we didn't actually implement the overridden + // CreateWebRenderCommandsFromDisplayList on it. That doesn't seem correct. + MOZ_ASSERT(aManager->IsLayersFreeTransaction()); + + aManager->CreateWebRenderCommandsFromDisplayList(GetChildren(), + aDisplayListBuilder, + aSc, + aBuilder); + return true; +} + static nsresult WrapDisplayList(nsDisplayListBuilder* aBuilder, nsIFrame* aFrame, nsDisplayList* aList, nsDisplayWrapper* aWrapper) { diff --git a/layout/painting/nsDisplayList.h b/layout/painting/nsDisplayList.h index 3b2a6ee14f03b..1dc30078ad314 100644 --- a/layout/painting/nsDisplayList.h +++ b/layout/painting/nsDisplayList.h @@ -3870,6 +3870,12 @@ class nsDisplayWrapList : public nsDisplayItem { return nullptr; } + virtual bool CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBuilder, + const StackingContextHelper& aSc, + nsTArray& aParentCommands, + mozilla::layers::WebRenderLayerManager* aManager, + nsDisplayListBuilder* aDisplayListBuilder) override; + protected: nsDisplayWrapList() {} From 02b7fcf7f57d1e7d5b7894dff073eab36b19ed6c Mon Sep 17 00:00:00 2001 From: Kartikaya Gupta Date: Thu, 20 Jul 2017 14:14:27 -0400 Subject: [PATCH 003/152] Bug 1382744 - Have nsDisplayTransform reuse nsDisplayWrapList::CreateWebRenderCommands. r=ethlin MozReview-Commit-ID: 5s3eECpao6s --HG-- extra : rebase_source : 8764b95bf564538245fd3bbacf27463ed08c08d6 --- layout/painting/nsDisplayList.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/layout/painting/nsDisplayList.cpp b/layout/painting/nsDisplayList.cpp index e922046fd9997..86683eaca69d9 100644 --- a/layout/painting/nsDisplayList.cpp +++ b/layout/painting/nsDisplayList.cpp @@ -7613,12 +7613,8 @@ nsDisplayTransform::CreateWebRenderCommands(mozilla::wr::DisplayListBuilder& aBu transformForSC, filters); - aManager->CreateWebRenderCommandsFromDisplayList(mStoredList.GetChildren(), - aDisplayListBuilder, - sc, - aBuilder); - - return true; + return mStoredList.CreateWebRenderCommands(aBuilder, sc, aParentCommands, + aManager, aDisplayListBuilder); } already_AddRefed nsDisplayTransform::BuildLayer(nsDisplayListBuilder *aBuilder, From 408e40547b80282f109ae6dd5d5fb51c03d60118 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20Jim=C3=A9nez=20Moreno?= Date: Thu, 20 Jul 2017 11:34:35 -0700 Subject: [PATCH 004/152] servo: Merge #17256 - Implement basic Time To First Paint and First Contentful Paint PWMs (from ferjm:ttfp); r=jdm - [X] `./mach build -d` does not report any errors - [X] `./mach test-tidy` does not report any errors Source-Repo: https://github.com/servo/servo Source-Revision: eba573d774dd2ac07ec8d62f1ad8deffca4667a4 --HG-- extra : subtree_source : https%3A//hg.mozilla.org/projects/converted-servo-linear extra : subtree_revision : 3ec7d47692901d4f9fd7e9de576fc7ea03600760 --- servo/Cargo.lock | 16 +++ servo/components/config/opts.rs | 6 + servo/components/constellation/Cargo.toml | 1 + servo/components/constellation/lib.rs | 1 + servo/components/constellation/pipeline.rs | 7 +- servo/components/layout_thread/Cargo.toml | 1 + servo/components/layout_thread/lib.rs | 36 +++++- servo/components/layout_traits/Cargo.toml | 1 + servo/components/layout_traits/lib.rs | 5 +- servo/components/metrics/Cargo.toml | 16 +++ servo/components/metrics/lib.rs | 113 ++++++++++++++++++ servo/components/profile/time.rs | 2 + servo/components/profile_traits/time.rs | 2 + servo/components/script/Cargo.toml | 1 + servo/components/script/lib.rs | 1 + servo/components/script/script_thread.rs | 6 +- .../script_layout_interface/Cargo.toml | 1 + .../components/script_layout_interface/lib.rs | 1 + .../script_layout_interface/message.rs | 5 + servo/components/script_traits/Cargo.toml | 1 + 20 files changed, 213 insertions(+), 10 deletions(-) create mode 100644 servo/components/metrics/Cargo.toml create mode 100644 servo/components/metrics/lib.rs diff --git a/servo/Cargo.lock b/servo/Cargo.lock index 02cdc75b2e2f6..b2a05f3be0fd4 100644 --- a/servo/Cargo.lock +++ b/servo/Cargo.lock @@ -522,6 +522,7 @@ dependencies = [ "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "layout_traits 0.0.1", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.0.1", "msg 0.0.1", "net 0.0.1", "net_traits 0.0.1", @@ -1501,6 +1502,7 @@ dependencies = [ "layout_traits 0.0.1", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.0.1", "msg 0.0.1", "net_traits 0.0.1", "parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1527,6 +1529,7 @@ version = "0.0.1" dependencies = [ "gfx 0.0.1", "ipc-channel 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.0.1", "msg 0.0.1", "net_traits 0.0.1", "profile_traits 0.0.1", @@ -1694,6 +1697,16 @@ dependencies = [ "toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "metrics" +version = "0.0.1" +dependencies = [ + "gfx 0.0.1", + "profile_traits 0.0.1", + "servo_config 0.0.1", + "time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mime" version = "0.2.4" @@ -2453,6 +2466,7 @@ dependencies = [ "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.0.1", "mime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "msg 0.0.1", @@ -2512,6 +2526,7 @@ dependencies = [ "ipc-channel 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.0.1", "msg 0.0.1", "net_traits 0.0.1", "profile_traits 0.0.1", @@ -2556,6 +2571,7 @@ dependencies = [ "hyper_serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "ipc-channel 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "metrics 0.0.1", "msg 0.0.1", "net_traits 0.0.1", "offscreen_gl_context 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/servo/components/config/opts.rs b/servo/components/config/opts.rs index b03ed69c17b05..e94eef2772c8f 100644 --- a/servo/components/config/opts.rs +++ b/servo/components/config/opts.rs @@ -224,6 +224,9 @@ pub struct Opts { /// Unminify Javascript. pub unminify_js: bool, + + /// Print Progressive Web Metrics to console. + pub print_pwm: bool, } fn print_usage(app: &str, opts: &Options) { @@ -544,6 +547,7 @@ pub fn default_opts() -> Opts { signpost: false, certificate_path: None, unminify_js: false, + print_pwm: false, } } @@ -608,6 +612,7 @@ pub fn from_cmdline_args(args: &[String]) -> ArgumentParsingResult { opts.optopt("", "profiler-db-user", "Profiler database user", ""); opts.optopt("", "profiler-db-pass", "Profiler database password", ""); opts.optopt("", "profiler-db-name", "Profiler database name", ""); + opts.optflag("", "print-pwm", "Print Progressive Web Metrics"); let opt_match = match opts.parse(args) { Ok(m) => m, @@ -843,6 +848,7 @@ pub fn from_cmdline_args(args: &[String]) -> ArgumentParsingResult { signpost: debug_options.signpost, certificate_path: opt_match.opt_str("certificate-path"), unminify_js: opt_match.opt_present("unminify-js"), + print_pwm: opt_match.opt_present("print-pwm"), }; set_defaults(opts); diff --git a/servo/components/constellation/Cargo.toml b/servo/components/constellation/Cargo.toml index 499a4c2dd0574..ff6bd2700f0b8 100644 --- a/servo/components/constellation/Cargo.toml +++ b/servo/components/constellation/Cargo.toml @@ -26,6 +26,7 @@ ipc-channel = "0.8" itertools = "0.5" layout_traits = {path = "../layout_traits"} log = "0.3.5" +metrics = {path = "../metrics"} msg = {path = "../msg"} net = {path = "../net"} net_traits = {path = "../net_traits"} diff --git a/servo/components/constellation/lib.rs b/servo/components/constellation/lib.rs index ec0c513dace39..d2ac0f00dead9 100644 --- a/servo/components/constellation/lib.rs +++ b/servo/components/constellation/lib.rs @@ -26,6 +26,7 @@ extern crate itertools; extern crate layout_traits; #[macro_use] extern crate log; +extern crate metrics; extern crate msg; extern crate net; extern crate net_traits; diff --git a/servo/components/constellation/pipeline.rs b/servo/components/constellation/pipeline.rs index 919c6203fa78c..d6571c208c967 100644 --- a/servo/components/constellation/pipeline.rs +++ b/servo/components/constellation/pipeline.rs @@ -14,6 +14,7 @@ use ipc_channel::Error; use ipc_channel::ipc::{self, IpcReceiver, IpcSender}; use ipc_channel::router::ROUTER; use layout_traits::LayoutThreadFactory; +use metrics::PaintTimeMetrics; use msg::constellation_msg::{BrowsingContextId, TopLevelBrowsingContextId, FrameType, PipelineId, PipelineNamespaceId}; use net::image_cache::ImageCacheImpl; use net_traits::{IpcSend, ResourceThreads}; @@ -471,6 +472,7 @@ impl UnprivilegedPipelineContent { STF: ScriptThreadFactory { let image_cache = Arc::new(ImageCacheImpl::new(self.webrender_api_sender.create_api())); + let paint_time_metrics = PaintTimeMetrics::new(self.time_profiler_chan.clone()); let layout_pair = STF::create(InitialScriptState { id: self.id, browsing_context_id: self.browsing_context_id, @@ -490,7 +492,7 @@ impl UnprivilegedPipelineContent { window_size: self.window_size, pipeline_namespace_id: self.pipeline_namespace_id, content_process_shutdown_chan: self.script_content_process_shutdown_chan, - webvr_thread: self.webvr_thread + webvr_thread: self.webvr_thread, }, self.load_data.clone()); LTF::create(self.id, @@ -508,7 +510,8 @@ impl UnprivilegedPipelineContent { Some(self.layout_content_process_shutdown_chan), self.webrender_api_sender, self.prefs.get("layout.threads").expect("exists").value() - .as_u64().expect("count") as usize); + .as_u64().expect("count") as usize, + paint_time_metrics); if wait_for_completion { let _ = self.script_content_process_shutdown_port.recv(); diff --git a/servo/components/layout_thread/Cargo.toml b/servo/components/layout_thread/Cargo.toml index 883205bca281d..de3fd19e22491 100644 --- a/servo/components/layout_thread/Cargo.toml +++ b/servo/components/layout_thread/Cargo.toml @@ -23,6 +23,7 @@ layout = {path = "../layout"} layout_traits = {path = "../layout_traits"} lazy_static = "0.2" log = "0.3.5" +metrics = {path = "../metrics"} msg = {path = "../msg"} net_traits = {path = "../net_traits"} parking_lot = {version = "0.4", features = ["nightly"]} diff --git a/servo/components/layout_thread/lib.rs b/servo/components/layout_thread/lib.rs index f8ff4af9cfa05..5faf114740bb8 100644 --- a/servo/components/layout_thread/lib.rs +++ b/servo/components/layout_thread/lib.rs @@ -27,6 +27,7 @@ extern crate layout_traits; extern crate lazy_static; #[macro_use] extern crate log; +extern crate metrics; extern crate msg; extern crate net_traits; extern crate parking_lot; @@ -83,6 +84,7 @@ use layout::traversal::{ComputeAbsolutePositions, RecalcStyleAndConstructFlows}; use layout::webrender_helpers::WebRenderDisplayListConverter; use layout::wrapper::LayoutNodeLayoutData; use layout_traits::LayoutThreadFactory; +use metrics::{PaintTimeMetrics, ProfilerMetadataFactory}; use msg::constellation_msg::PipelineId; use msg::constellation_msg::TopLevelBrowsingContextId; use net_traits::image_cache::{ImageCache, UsePlaceholder}; @@ -248,7 +250,10 @@ pub struct LayoutThread { layout_threads: usize, /// Which quirks mode are we rendering the document in? - quirks_mode: Option + quirks_mode: Option, + + /// Paint time metrics. + paint_time_metrics: PaintTimeMetrics, } impl LayoutThreadFactory for LayoutThread { @@ -269,7 +274,8 @@ impl LayoutThreadFactory for LayoutThread { mem_profiler_chan: mem::ProfilerChan, content_process_shutdown_chan: Option>, webrender_api_sender: webrender_api::RenderApiSender, - layout_threads: usize) { + layout_threads: usize, + paint_time_metrics: PaintTimeMetrics) { thread::Builder::new().name(format!("LayoutThread {:?}", id)).spawn(move || { thread_state::initialize(thread_state::LAYOUT); @@ -291,7 +297,8 @@ impl LayoutThreadFactory for LayoutThread { time_profiler_chan, mem_profiler_chan.clone(), webrender_api_sender, - layout_threads); + layout_threads, + paint_time_metrics); let reporter_name = format!("layout-reporter-{}", id); mem_profiler_chan.run_with_memory_reporting(|| { @@ -452,7 +459,8 @@ impl LayoutThread { time_profiler_chan: time::ProfilerChan, mem_profiler_chan: mem::ProfilerChan, webrender_api_sender: webrender_api::RenderApiSender, - layout_threads: usize) + layout_threads: usize, + paint_time_metrics: PaintTimeMetrics) -> LayoutThread { let device = Device::new( MediaType::Screen, @@ -551,6 +559,7 @@ impl LayoutThread { }, layout_threads: layout_threads, quirks_mode: None, + paint_time_metrics: paint_time_metrics, } } @@ -733,7 +742,10 @@ impl LayoutThread { debug!("layout: ExitNow received"); self.exit_now(); return false - } + }, + Msg::SetNavigationStart(time) => { + self.paint_time_metrics.set_navigation_start(time); + }, } true @@ -785,7 +797,8 @@ impl LayoutThread { self.mem_profiler_chan.clone(), info.content_process_shutdown_chan, self.webrender_api.clone_sender(), - info.layout_threads); + info.layout_threads, + info.paint_time_metrics); } /// Enters a quiescent state in which no new messages will be processed until an `ExitNow` is @@ -1020,6 +1033,12 @@ impl LayoutThread { self.epoch.set(epoch); let viewport_size = webrender_api::LayoutSize::from_untyped(&viewport_size); + + // Set paint metrics if needed right before sending the display list to WebRender. + // XXX At some point, we may want to set this metric from WebRender itself. + self.paint_time_metrics.maybe_set_first_paint(self); + self.paint_time_metrics.maybe_set_first_contentful_paint(self, &display_list); + self.webrender_api.set_display_list( Some(get_root_flow_background_color(layout_root)), webrender_api::Epoch(epoch.0), @@ -1655,6 +1674,11 @@ impl LayoutThread { } } +impl ProfilerMetadataFactory for LayoutThread { + fn new_metadata(&self) -> Option { + self.profiler_metadata() + } +} // The default computed value for background-color is transparent (see // http://dev.w3.org/csswg/css-backgrounds/#background-color). However, we diff --git a/servo/components/layout_traits/Cargo.toml b/servo/components/layout_traits/Cargo.toml index 0368f1058bdab..6a6824463d421 100644 --- a/servo/components/layout_traits/Cargo.toml +++ b/servo/components/layout_traits/Cargo.toml @@ -12,6 +12,7 @@ path = "lib.rs" [dependencies] gfx = {path = "../gfx"} ipc-channel = "0.8" +metrics = {path = "../metrics"} msg = {path = "../msg"} net_traits = {path = "../net_traits"} profile_traits = {path = "../profile_traits"} diff --git a/servo/components/layout_traits/lib.rs b/servo/components/layout_traits/lib.rs index eced3137e1092..4b906e4c1c532 100644 --- a/servo/components/layout_traits/lib.rs +++ b/servo/components/layout_traits/lib.rs @@ -6,6 +6,7 @@ extern crate gfx; extern crate ipc_channel; +extern crate metrics; extern crate msg; extern crate net_traits; extern crate profile_traits; @@ -20,6 +21,7 @@ extern crate webrender_api; use gfx::font_cache_thread::FontCacheThread; use ipc_channel::ipc::{IpcReceiver, IpcSender}; +use metrics::PaintTimeMetrics; use msg::constellation_msg::PipelineId; use msg::constellation_msg::TopLevelBrowsingContextId; use net_traits::image_cache::ImageCache; @@ -48,5 +50,6 @@ pub trait LayoutThreadFactory { mem_profiler_chan: mem::ProfilerChan, content_process_shutdown_chan: Option>, webrender_api_sender: webrender_api::RenderApiSender, - layout_threads: usize); + layout_threads: usize, + paint_time_metrics: PaintTimeMetrics); } diff --git a/servo/components/metrics/Cargo.toml b/servo/components/metrics/Cargo.toml new file mode 100644 index 0000000000000..a049a3eb0c013 --- /dev/null +++ b/servo/components/metrics/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "metrics" +version = "0.0.1" +authors = ["The Servo Project Developers"] +license = "MPL-2.0" +publish = false + +[lib] +name = "metrics" +path = "lib.rs" + +[dependencies] +gfx = {path = "../gfx"} +profile_traits = {path = "../profile_traits"} +servo_config = {path = "../config"} +time = "0.1.12" diff --git a/servo/components/metrics/lib.rs b/servo/components/metrics/lib.rs new file mode 100644 index 0000000000000..98773a1d20566 --- /dev/null +++ b/servo/components/metrics/lib.rs @@ -0,0 +1,113 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +extern crate gfx; +extern crate profile_traits; +extern crate servo_config; +extern crate time; + +use gfx::display_list::{DisplayItem, DisplayList}; +use profile_traits::time::{ProfilerChan, ProfilerCategory, send_profile_data}; +use profile_traits::time::TimerMetadata; +use servo_config::opts; +use std::cell::Cell; + +pub trait ProfilerMetadataFactory { + fn new_metadata(&self) -> Option; +} + +macro_rules! make_time_setter( + ( $attr:ident, $func:ident, $category:ident, $label:expr ) => ( + fn $func(&self, profiler_metadata_factory: &T) + where T: ProfilerMetadataFactory { + let navigation_start = match self.navigation_start { + Some(time) => time, + None => { + println!("Trying to set metric before navigation start"); + return; + } + }; + + let now = time::precise_time_ns() as f64; + let time = now - navigation_start; + self.$attr.set(Some(time)); + + // Send the metric to the time profiler. + send_profile_data(ProfilerCategory::$category, + profiler_metadata_factory.new_metadata(), + &self.time_profiler_chan, + time as u64, time as u64, 0, 0); + + // Print the metric to console if the print-pwm option was given. + if opts::get().print_pwm { + println!("{:?} {:?}", $label, time); + } + } + ); +); + +pub struct PaintTimeMetrics { + navigation_start: Option, + first_paint: Cell>, + first_contentful_paint: Cell>, + time_profiler_chan: ProfilerChan, +} + +impl PaintTimeMetrics { + pub fn new(time_profiler_chan: ProfilerChan) + -> PaintTimeMetrics { + PaintTimeMetrics { + navigation_start: None, + first_paint: Cell::new(None), + first_contentful_paint: Cell::new(None), + time_profiler_chan: time_profiler_chan, + } + } + + pub fn set_navigation_start(&mut self, time: f64) { + self.navigation_start = Some(time); + } + + make_time_setter!(first_paint, set_first_paint, + TimeToFirstPaint, + "first-paint"); + make_time_setter!(first_contentful_paint, set_first_contentful_paint, + TimeToFirstContentfulPaint, + "first-contentful-paint"); + + pub fn maybe_set_first_paint(&self, profiler_metadata_factory: &T) + where T: ProfilerMetadataFactory { + { + if self.first_paint.get().is_some() { + return; + } + } + + self.set_first_paint(profiler_metadata_factory); + } + + pub fn maybe_set_first_contentful_paint(&self, profiler_metadata_factory: &T, + display_list: &DisplayList) + where T: ProfilerMetadataFactory { + { + if self.first_contentful_paint.get().is_some() { + return; + } + } + + // Analyze display list to figure out if this is the first contentful + // paint (i.e. the display list contains items of type text, image, + // non-white canvas or SVG) + for item in &display_list.list { + match item { + &DisplayItem::Text(_) | + &DisplayItem::Image(_) => { + self.set_first_contentful_paint(profiler_metadata_factory); + return; + }, + _ => (), + } + } + } +} diff --git a/servo/components/profile/time.rs b/servo/components/profile/time.rs index cc94dd9eb4840..16495b63d43af 100644 --- a/servo/components/profile/time.rs +++ b/servo/components/profile/time.rs @@ -154,6 +154,8 @@ impl Formattable for ProfilerCategory { ProfilerCategory::ScriptExitFullscreen => "Script Exit Fullscreen", ProfilerCategory::ScriptWebVREvent => "Script WebVR Event", ProfilerCategory::ScriptWorkletEvent => "Script Worklet Event", + ProfilerCategory::TimeToFirstPaint => "Time To First Paint", + ProfilerCategory::TimeToFirstContentfulPaint => "Time To First Contentful Paint", ProfilerCategory::ApplicationHeartbeat => "Application Heartbeat", }; format!("{}{}", padding, name) diff --git a/servo/components/profile_traits/time.rs b/servo/components/profile_traits/time.rs index 404a2340c2fd5..c883c1f83365c 100644 --- a/servo/components/profile_traits/time.rs +++ b/servo/components/profile_traits/time.rs @@ -90,6 +90,8 @@ pub enum ProfilerCategory { ScriptExitFullscreen = 0x78, ScriptWebVREvent = 0x79, ScriptWorkletEvent = 0x7a, + TimeToFirstPaint = 0x80, + TimeToFirstContentfulPaint = 0x81, ApplicationHeartbeat = 0x90, } diff --git a/servo/components/script/Cargo.toml b/servo/components/script/Cargo.toml index c98f11e90aceb..b183f804e9541 100644 --- a/servo/components/script/Cargo.toml +++ b/servo/components/script/Cargo.toml @@ -56,6 +56,7 @@ jstraceable_derive = {path = "../jstraceable_derive"} lazy_static = "0.2" libc = "0.2" log = "0.3.5" +metrics = {path = "../metrics"} mime = "0.2.1" mime_guess = "1.8.0" msg = {path = "../msg"} diff --git a/servo/components/script/lib.rs b/servo/components/script/lib.rs index d5d4aef293b9f..e1a1ab592cb7b 100644 --- a/servo/components/script/lib.rs +++ b/servo/components/script/lib.rs @@ -64,6 +64,7 @@ extern crate lazy_static; extern crate libc; #[macro_use] extern crate log; +extern crate metrics; #[macro_use] extern crate mime; extern crate mime_guess; diff --git a/servo/components/script/script_thread.rs b/servo/components/script/script_thread.rs index 6f811c8be67c3..e87938bfa1e01 100644 --- a/servo/components/script/script_thread.rs +++ b/servo/components/script/script_thread.rs @@ -71,6 +71,7 @@ use js::jsapi::{JSTracer, SetWindowProxyClass}; use js::jsval::UndefinedValue; use js::rust::Runtime; use mem::heap_size_of_self_and_children; +use metrics::PaintTimeMetrics; use microtask::{MicrotaskQueue, Microtask}; use msg::constellation_msg::{BrowsingContextId, FrameType, PipelineId, PipelineNamespace, TopLevelBrowsingContextId}; use net_traits::{FetchMetadata, FetchResponseListener, FetchResponseMsg}; @@ -176,6 +177,8 @@ impl InProgressLoad { url: ServoUrl, origin: MutableOrigin) -> InProgressLoad { let current_time = get_time(); + let navigation_start_precise = precise_time_ns() as f64; + layout_chan.send(message::Msg::SetNavigationStart(navigation_start_precise)).unwrap(); InProgressLoad { pipeline_id: id, browsing_context_id: browsing_context_id, @@ -188,7 +191,7 @@ impl InProgressLoad { url: url, origin: origin, navigation_start: (current_time.sec * 1000 + current_time.nsec as i64 / 1000000) as u64, - navigation_start_precise: precise_time_ns() as f64, + navigation_start_precise: navigation_start_precise, } } } @@ -1453,6 +1456,7 @@ impl ScriptThread { image_cache: self.image_cache.clone(), content_process_shutdown_chan: content_process_shutdown_chan, layout_threads: layout_threads, + paint_time_metrics: PaintTimeMetrics::new(self.time_profiler_chan.clone()), }); // Pick a layout thread, any layout thread diff --git a/servo/components/script_layout_interface/Cargo.toml b/servo/components/script_layout_interface/Cargo.toml index a0db721012a39..6e566488f65cb 100644 --- a/servo/components/script_layout_interface/Cargo.toml +++ b/servo/components/script_layout_interface/Cargo.toml @@ -22,6 +22,7 @@ html5ever = "0.18" ipc-channel = "0.8" libc = "0.2" log = "0.3.5" +metrics = {path = "../metrics"} msg = {path = "../msg"} net_traits = {path = "../net_traits"} profile_traits = {path = "../profile_traits"} diff --git a/servo/components/script_layout_interface/lib.rs b/servo/components/script_layout_interface/lib.rs index 210bb1a5c11a4..e3e956096bb6b 100644 --- a/servo/components/script_layout_interface/lib.rs +++ b/servo/components/script_layout_interface/lib.rs @@ -24,6 +24,7 @@ extern crate ipc_channel; extern crate libc; #[macro_use] extern crate log; +extern crate metrics; extern crate msg; extern crate net_traits; extern crate profile_traits; diff --git a/servo/components/script_layout_interface/message.rs b/servo/components/script_layout_interface/message.rs index bd68eaec7a630..9c8a3fbbd477f 100644 --- a/servo/components/script_layout_interface/message.rs +++ b/servo/components/script_layout_interface/message.rs @@ -7,6 +7,7 @@ use app_units::Au; use euclid::{Point2D, Rect}; use gfx_traits::Epoch; use ipc_channel::ipc::{IpcReceiver, IpcSender}; +use metrics::PaintTimeMetrics; use msg::constellation_msg::PipelineId; use net_traits::image_cache::ImageCache; use profile_traits::mem::ReportsChan; @@ -89,6 +90,9 @@ pub enum Msg { /// Tells layout that script has added some paint worklet modules. RegisterPaint(Atom, Vec, Arc), + + /// Send to layout the precise time when the navigation started. + SetNavigationStart(f64), } @@ -158,4 +162,5 @@ pub struct NewLayoutThreadInfo { pub image_cache: Arc, pub content_process_shutdown_chan: Option>, pub layout_threads: usize, + pub paint_time_metrics: PaintTimeMetrics, } diff --git a/servo/components/script_traits/Cargo.toml b/servo/components/script_traits/Cargo.toml index 97790b822306d..8a8965ce3e3a6 100644 --- a/servo/components/script_traits/Cargo.toml +++ b/servo/components/script_traits/Cargo.toml @@ -23,6 +23,7 @@ hyper = "0.10" hyper_serde = "0.7" ipc-channel = "0.8" libc = "0.2" +metrics = {path = "../metrics"} msg = {path = "../msg"} net_traits = {path = "../net_traits"} offscreen_gl_context = { version = "0.11", features = ["serde"] } From f3248efb0d88c8118a0f841549bf02f909e51d1a Mon Sep 17 00:00:00 2001 From: Shane Tomlinson Date: Thu, 20 Jul 2017 10:35:47 +0100 Subject: [PATCH 005/152] Fix the private browsing mode check in FxAccountsWebChannel.jsm (bug 1378766) r=eoger,markh The private browsing mode check reached into the sendingContext's browser's docShell for it's check, the Law of Demeter was shattered. PrivateBrowsingUtils.jsm provides all the functionality needed for the check, just call PrivateBrowsingUtils.isBrowserPrivate with the sendingContext's browser. MozReview-Commit-ID: DRIU1fy94ml *** Bug 1378766 - Remove the `sendingContext.browser` defined check MozReview-Commit-ID: GWFFggOoItP --HG-- extra : rebase_source : 1c99ee2ea2cceee185ba20ff54e6e67f86f10766 --- services/fxaccounts/FxAccountsWebChannel.jsm | 10 ++--- .../tests/xpcshell/test_web_channel.js | 39 ++++++++++++++----- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/services/fxaccounts/FxAccountsWebChannel.jsm b/services/fxaccounts/FxAccountsWebChannel.jsm index b45e6773982f5..a4ad0ff7bf454 100644 --- a/services/fxaccounts/FxAccountsWebChannel.jsm +++ b/services/fxaccounts/FxAccountsWebChannel.jsm @@ -24,6 +24,8 @@ XPCOMUtils.defineLazyModuleGetter(this, "fxAccounts", "resource://gre/modules/FxAccounts.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "FxAccountsStorageManagerCanStoreField", "resource://gre/modules/FxAccountsStorage.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "PrivateBrowsingUtils", + "resource://gre/modules/PrivateBrowsingUtils.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "Weave", "resource://services-sync/main.js"); @@ -261,6 +263,7 @@ this.FxAccountsWebChannelHelpers = function(options) { options = options || {}; this._fxAccounts = options.fxAccounts || fxAccounts; + this._privateBrowsingUtils = options.privateBrowsingUtils || PrivateBrowsingUtils; }; this.FxAccountsWebChannelHelpers.prototype = { @@ -343,15 +346,12 @@ this.FxAccountsWebChannelHelpers.prototype = { * Check if `sendingContext` is in private browsing mode. */ isPrivateBrowsingMode(sendingContext) { - if (!sendingContext || - !sendingContext.browser || - !sendingContext.browser.docShell || - sendingContext.browser.docShell.usePrivateBrowsing === undefined) { + if (!sendingContext) { log.error("Unable to check for private browsing mode, assuming true"); return true; } - const isPrivateBrowsing = sendingContext.browser.docShell.usePrivateBrowsing; + const isPrivateBrowsing = this._privateBrowsingUtils.isBrowserPrivate(sendingContext.browser); log.debug("is private browsing", isPrivateBrowsing); return isPrivateBrowsing; }, diff --git a/services/fxaccounts/tests/xpcshell/test_web_channel.js b/services/fxaccounts/tests/xpcshell/test_web_channel.js index 2430c103cd684..033b79da35570 100644 --- a/services/fxaccounts/tests/xpcshell/test_web_channel.js +++ b/services/fxaccounts/tests/xpcshell/test_web_channel.js @@ -10,11 +10,7 @@ const { FxAccountsWebChannel, FxAccountsWebChannelHelpers } = const URL_STRING = "https://example.com"; const mockSendingContext = { - browser: { - docShell: { - usePrivateBrowsing: false - } - }, + browser: {}, principal: {}, eventTarget: {} }; @@ -487,6 +483,9 @@ add_task(async function test_helpers_getFxAStatus_extra_engines() { verified: true }); } + }, + privateBrowsingUtils: { + isBrowserPrivate: () => true } }); @@ -720,19 +719,41 @@ add_task(async function test_helpers_shouldAllowFxaStatus_no_service_private_bro }); add_task(async function test_helpers_isPrivateBrowsingMode_private_browsing() { - let helpers = new FxAccountsWebChannelHelpers({}); - mockSendingContext.browser.docShell.usePrivateBrowsing = true; + let wasCalled = { + isBrowserPrivate: false + }; + let helpers = new FxAccountsWebChannelHelpers({ + privateBrowsingUtils: { + isBrowserPrivate(browser) { + wasCalled.isBrowserPrivate = true; + do_check_eq(browser, mockSendingContext.browser); + return true; + } + } + }); let isPrivateBrowsingMode = helpers.isPrivateBrowsingMode(mockSendingContext); do_check_true(isPrivateBrowsingMode); + do_check_true(wasCalled.isBrowserPrivate); }); add_task(async function test_helpers_isPrivateBrowsingMode_private_browsing() { - let helpers = new FxAccountsWebChannelHelpers({}); - mockSendingContext.browser.docShell.usePrivateBrowsing = false; + let wasCalled = { + isBrowserPrivate: false + }; + let helpers = new FxAccountsWebChannelHelpers({ + privateBrowsingUtils: { + isBrowserPrivate(browser) { + wasCalled.isBrowserPrivate = true; + do_check_eq(browser, mockSendingContext.browser); + return false; + } + } + }); let isPrivateBrowsingMode = helpers.isPrivateBrowsingMode(mockSendingContext); do_check_false(isPrivateBrowsingMode); + do_check_true(wasCalled.isBrowserPrivate); }); add_task(async function test_helpers_change_password() { From 1e19736e16970229f1c6e0d29a91c2e4829be6cc Mon Sep 17 00:00:00 2001 From: Zibi Braniecki Date: Tue, 30 May 2017 10:30:26 +0200 Subject: [PATCH 006/152] Bug 1362617 - Generalize MOZ_CHROME_MULTILOCALE to work for browser as well. r=gps,ted In order to get Firefox to work with multiple built-in languages, we want to bundle the same file in desktop as we do in fennec - res/multilocale.json MozReview-Commit-ID: BY1cMYd0q8Q --HG-- extra : rebase_source : 4bc95d0055a98869481324654d9e91bdf0734894 --- .../static/browser_all_files_referenced.js | 3 +++ browser/installer/Makefile.in | 2 ++ browser/installer/package-manifest.in | 6 +++-- mobile/android/installer/Makefile.in | 25 +---------------- mobile/android/installer/package-manifest.in | 2 -- toolkit/locales/Makefile.in | 27 +++++++++++++++++++ 6 files changed, 37 insertions(+), 28 deletions(-) diff --git a/browser/base/content/test/static/browser_all_files_referenced.js b/browser/base/content/test/static/browser_all_files_referenced.js index 062a4c966eeea..4249bf5b66a2b 100644 --- a/browser/base/content/test/static/browser_all_files_referenced.js +++ b/browser/base/content/test/static/browser_all_files_referenced.js @@ -99,6 +99,9 @@ var whitelist = [ {file: "resource://gre/modules/ClusterLib.js"}, {file: "resource://gre/modules/ColorConversion.js"}, + // List of built-in locales. See bug 1362617 for details. + {file: "resource://gre/res/multilocale.json"}, + // The l10n build system can't package string files only for some platforms. {file: "resource://gre/chrome/en-US/locale/en-US/global-platform/mac/accessible.properties", platforms: ["linux", "win"]}, diff --git a/browser/installer/Makefile.in b/browser/installer/Makefile.in index a888171ddbd9e..131291b4a5905 100644 --- a/browser/installer/Makefile.in +++ b/browser/installer/Makefile.in @@ -12,6 +12,8 @@ MOZ_PKG_REMOVALS = $(srcdir)/removed-files.in MOZ_PKG_MANIFEST = $(srcdir)/package-manifest.in MOZ_PKG_DUPEFLAGS = -f $(srcdir)/allowed-dupes.mn +DEFINES += -DPKG_LOCALE_MANIFEST=$(topobjdir)/toolkit/locales/locale-manifest.in + # Some files have been already bundled with xulrunner ifndef MOZ_MULET MOZ_PKG_FATAL_WARNINGS = 1 diff --git a/browser/installer/package-manifest.in b/browser/installer/package-manifest.in index 12e7d31a2402e..20245663a4ad2 100644 --- a/browser/installer/package-manifest.in +++ b/browser/installer/package-manifest.in @@ -47,8 +47,6 @@ [@AB_CD@] @RESPATH@/browser/chrome/@AB_CD@@JAREXT@ @RESPATH@/browser/chrome/@AB_CD@.manifest -@RESPATH@/chrome/@AB_CD@@JAREXT@ -@RESPATH@/chrome/@AB_CD@.manifest @RESPATH@/dictionaries/* #if defined(XP_WIN) || defined(XP_LINUX) @RESPATH@/fonts/* @@ -837,3 +835,7 @@ bin/libfreebl_32int64_3.so @RESPATH@/fix_linux_stack.py #endif #endif + +#ifdef PKG_LOCALE_MANIFEST +#include @PKG_LOCALE_MANIFEST@ +#endif diff --git a/mobile/android/installer/Makefile.in b/mobile/android/installer/Makefile.in index 7905a3c109c57..34dc8f09bd211 100644 --- a/mobile/android/installer/Makefile.in +++ b/mobile/android/installer/Makefile.in @@ -19,11 +19,7 @@ MOZ_PKG_REMOVALS = $(srcdir)/removed-files.in MOZ_PKG_MANIFEST = $(srcdir)/package-manifest.in MOZ_PKG_DUPEFLAGS = -f $(srcdir)/allowed-dupes.mn -ifdef MOZ_CHROME_MULTILOCALE -MOZ_PKG_MANIFEST_DEPS = locale-manifest.in - -DEFINES += -DPKG_LOCALE_MANIFEST=$(CURDIR)/locale-manifest.in -endif +DEFINES += -DPKG_LOCALE_MANIFEST=$(topobjdir)/toolkit/locales/locale-manifest.in DEFINES += \ -DMOZ_APP_NAME=$(MOZ_APP_NAME) \ @@ -84,22 +80,3 @@ DEFINES += -DBINPATH=$(BINPATH) ifdef ENABLE_MARIONETTE DEFINES += -DENABLE_MARIONETTE=1 endif - - -ifdef MOZ_CHROME_MULTILOCALE -# When MOZ_CHROME_MULTILOCALE is defined, we write multilocale.json like: -# {"locales": ["en-US", "de", "ar", ...]} -locale-manifest.in: $(GLOBAL_DEPS) FORCE - printf '\n[multilocale]\n' > $@ - printf '@BINPATH@/res/multilocale.json\n' >> $@ - for LOCALE in en-US $(MOZ_CHROME_MULTILOCALE) ;\ - do \ - printf '$(BINPATH)/chrome/'"$$LOCALE"'$(JAREXT)\n' >> $@; \ - printf '$(BINPATH)/chrome/'"$$LOCALE"'.manifest\n' >> $@; \ - done - COMMA=, - echo '{"locales": [$(foreach l,$(MOZ_CHROME_MULTILOCALE),"$(l)"$(COMMA)) "en-US"]}' \ - > $(FINAL_TARGET)/res/multilocale.json - -GARBAGE += locale-manifest.in -endif diff --git a/mobile/android/installer/package-manifest.in b/mobile/android/installer/package-manifest.in index 187aac21cf9be..a132477041489 100644 --- a/mobile/android/installer/package-manifest.in +++ b/mobile/android/installer/package-manifest.in @@ -15,8 +15,6 @@ #filter substitution [@AB_CD@] -@BINPATH@/chrome/@AB_CD@@JAREXT@ -@BINPATH@/chrome/@AB_CD@.manifest @BINPATH@/@PREF_DIR@/mobile-l10n.js @BINPATH@/update.locale #ifdef MOZ_UPDATER diff --git a/toolkit/locales/Makefile.in b/toolkit/locales/Makefile.in index e20128611073b..559515f2bf478 100644 --- a/toolkit/locales/Makefile.in +++ b/toolkit/locales/Makefile.in @@ -38,3 +38,30 @@ else $(SYSINSTALL) $(IFLAGS1) $^ $(FINAL_TARGET) endif endif + +libs:: locale-manifest.in + +MOZ_CHROME_MULTILOCALE?=en-US + +# Firefox uses @RESPATH@. +# Fennec uses @BINPATH@ and doesn't have the @RESPATH@ variable defined. +ifeq ($(MOZ_BUILD_APP),mobile/android) +BASE_PATH:=@BINPATH@ +else +BASE_PATH:=@RESPATH@ +endif + +locale-manifest.in: $(GLOBAL_DEPS) FORCE + printf '\n[multilocale]\n' > $@ + printf '$(BASE_PATH)/res/multilocale.json\n' >> $@ + for LOCALE in $(MOZ_CHROME_MULTILOCALE) ;\ + do \ + printf '$(BASE_PATH)/chrome/'"$$LOCALE"'@JAREXT@\n' >> $@; \ + printf '$(BASE_PATH)/chrome/'"$$LOCALE"'.manifest\n' >> $@; \ + done + COMMA=, + #XXX: It would be nice to not duplicate en-US here, but makefile makes it hard. + echo '{"locales": [$(foreach l,$(MOZ_CHROME_MULTILOCALE),"$(l)"$(COMMA)) "en-US"]}' \ + > $(DIST)/bin/res/multilocale.json + +GARBAGE += locale-manifest.in From bfe7ce45556c742277b3e0849ceeb5eb68de378c Mon Sep 17 00:00:00 2001 From: Henrik Skupin Date: Tue, 18 Jul 2017 15:00:37 +0200 Subject: [PATCH 007/152] Bug 1368434 - Fix race condition for slow frame script registrations. r=automatedtester In some cases the registration of the frame script takes longer and as result the page load events for DOMContentLoaded and pageshow are missed. With that the current command waits forever and causes a page load timeout error after 300s. By checking the readyState of the document before the listeners are getting attached, we can ensure to return immediately if the document has already been finished loading. MozReview-Commit-ID: 17f6jVz7sZZ --HG-- extra : rebase_source : 1cc27fc9bd4d9b4a39607f8d44692dfc7095b2d7 --- .../functional/private_browsing/manifest.ini | 1 - .../tests/unit/test_click.py | 2 - testing/marionette/listener.js | 70 ++++++++++++++++--- 3 files changed, 60 insertions(+), 13 deletions(-) diff --git a/testing/firefox-ui/tests/functional/private_browsing/manifest.ini b/testing/firefox-ui/tests/functional/private_browsing/manifest.ini index d5c5602edcf26..34ef273653ceb 100644 --- a/testing/firefox-ui/tests/functional/private_browsing/manifest.ini +++ b/testing/firefox-ui/tests/functional/private_browsing/manifest.ini @@ -2,4 +2,3 @@ tags = local [test_about_private_browsing.py] -skip-if = true # Bug 1353599 \ No newline at end of file diff --git a/testing/marionette/harness/marionette_harness/tests/unit/test_click.py b/testing/marionette/harness/marionette_harness/tests/unit/test_click.py index 1a0a747fd5603..36eb852b2702d 100644 --- a/testing/marionette/harness/marionette_harness/tests/unit/test_click.py +++ b/testing/marionette/harness/marionette_harness/tests/unit/test_click.py @@ -9,7 +9,6 @@ from marionette_harness import ( MarionetteTestCase, run_if_e10s, - skip_if_e10s, skip_if_mobile, WindowManagerMixin, ) @@ -84,7 +83,6 @@ def setUp(self): self.marionette.delete_session() self.marionette.start_session() - @skip_if_e10s("bug 1360446") def test_click(self): self.marionette.navigate(inline(""" diff --git a/testing/marionette/listener.js b/testing/marionette/listener.js index d93e2ec70714d..8de7a66b9601f 100644 --- a/testing/marionette/listener.js +++ b/testing/marionette/listener.js @@ -174,7 +174,23 @@ var loadListener = { curContainer.frame.addEventListener("unload", this); Services.obs.addObserver(this, "outer-window-destroyed"); + } else { + // The frame script got reloaded due to a new content process. + // Due to the time it takes to re-register the browser in Marionette, + // it can happen that page load events are missed before the listeners + // are getting attached again. By checking the document readyState the + // command can return immediately if the page load is already done. + let readyState = content.document.readyState; + let documentURI = content.document.documentURI; + logger.debug(`Check readyState "${readyState} for "${documentURI}"`); + + // If the page load has already finished, don't setup listeners and + // timers but return immediatelly. + if (this.handleReadyState(readyState, documentURI)) { + return; + } + addEventListener("DOMContentLoaded", loadListener); addEventListener("pageshow", loadListener); } @@ -250,43 +266,77 @@ var loadListener = { // Now wait until the target page has been loaded addEventListener("DOMContentLoaded", this, false); addEventListener("pageshow", this, false); - break; case "hashchange": this.stop(); sendOk(this.command_id); - break; case "DOMContentLoaded": - if (event.target.documentURI.startsWith("about:certerror")) { + case "pageshow": + this.handleReadyState(event.target.readyState, + event.target.documentURI); + break; + } + }, + + /** + * Checks the value of readyState for the current page + * load activity, and resolves the command if the load + * has been finished. It also takes care of the selected + * page load strategy. + * + * @param {string} readyState + * Current ready state of the document. + * @param {string} documentURI + * Current document URI of the document. + * + * @return {boolean} + * True if the page load has been finished. + */ + handleReadyState(readyState, documentURI) { + let finished = false; + + switch (readyState) { + case "interactive": + if (documentURI.startsWith("about:certerror")) { this.stop(); sendError(new InsecureCertificateError(), this.command_id); + finished = true; - } else if (/about:.*(error)\?/.exec(event.target.documentURI)) { + } else if (/about:.*(error)\?/.exec(documentURI)) { this.stop(); - sendError(new UnknownError("Reached error page: " + - event.target.documentURI), this.command_id); + sendError(new UnknownError(`Reached error page: ${documentURI}`), + this.command_id); + finished = true; // Return early with a page load strategy of eager, and also // special-case about:blocked pages which should be treated as - // non-error pages but do not raise a pageshow event. + // non-error pages but do not raise a pageshow event. about:blank + // is also treaded specifically here, because it gets temporary + // loaded for new content processes, and we only want to rely on + // complete loads for it. } else if ((capabilities.get("pageLoadStrategy") === - session.PageLoadStrategy.Eager) || - /about:blocked\?/.exec(event.target.documentURI)) { + session.PageLoadStrategy.Eager && + documentURI != "about:blank") || + /about:blocked\?/.exec(documentURI)) { this.stop(); sendOk(this.command_id); + finished = true; } break; - case "pageshow": + case "complete": this.stop(); sendOk(this.command_id); + finished = true; break; } + + return finished; }, /** From 19e8dfd20b5297ea3a0ff3c72503ffcdb2ca749a Mon Sep 17 00:00:00 2001 From: Manish Goregaokar Date: Thu, 20 Jul 2017 13:36:22 -0700 Subject: [PATCH 008/152] Bug 1382190 - servo: Move FontComputationData to the end of ServoComputedValues to make size check easier, make it NonZero. r=emilio MozReview-Commit-ID: B2gVj5bQRBT --- layout/style/ServoTypes.h | 39 ++++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/layout/style/ServoTypes.h b/layout/style/ServoTypes.h index d2eb43a6b3744..a95a8231e04a7 100644 --- a/layout/style/ServoTypes.h +++ b/layout/style/ServoTypes.h @@ -136,10 +136,32 @@ struct ServoWritingMode { uint8_t mBits; }; +// Don't attempt to read from this +// (see comment on ServoFontComputationData +enum ServoKeywordSize { + Empty, // when the Option is None + XXSmall, + XSmall, + Small, + Medium, + Large, + XLarge, + XXLarge, + XXXLarge, +}; + +// Don't attempt to read from this. We can't +// always guarantee that the interior representation +// of this is correct (the mKeyword field may have a different padding), +// but the entire struct should +// have the same size and alignment as the Rust version. +// Ensure layout tests get run if touching either side. struct ServoFontComputationData { - // 8 bytes, but is done as 4+4 for alignment - uint32_t mFour; - uint32_t mFour2; +private: + ServoKeywordSize mKeyword; + float/*32_t*/ mRatio; + + static_assert(sizeof(float) == 4, "float should be 32 bit"); }; struct ServoCustomPropertiesMap { @@ -206,7 +228,7 @@ struct ServoComputedValues { const nsStyleVariables* GetStyleVariables() const; mozilla::ServoCustomPropertiesMap custom_properties; mozilla::ServoWritingMode writing_mode; - mozilla::ServoFontComputationData font_computation_data; + mozilla::ServoComputedValueFlags flags; /// The rule node representing the ordered list of rules matched for this /// node. Can be None for default values and text nodes. This is /// essentially an optimization to avoid referencing the root rule node. @@ -215,7 +237,14 @@ struct ServoComputedValues { /// relevant link for this element. A element's "relevant link" is the /// element being matched if it is a link or the nearest ancestor link. mozilla::ServoVisitedStyle visited_style; - mozilla::ServoComputedValueFlags flags; + + // this is the last member because most of the other members + // are pointer sized. This makes it easier to deal with the + // alignment of the fields when replacing things via bindgen + // + // This is opaque, please don't read from it from C++ + // (see comment on ServoFontComputationData) + mozilla::ServoFontComputationData font_computation_data; // C++ just sees this struct as a bucket of bits, and will // do the wrong thing if we let it use the default copy ctor/assignment From ebd633ae0a76fca443e3a162a1e5f2a40a6a662c Mon Sep 17 00:00:00 2001 From: Nico Grunbaum Date: Tue, 13 Jun 2017 17:24:54 -0700 Subject: [PATCH 009/152] Bug 1371000 - add expiration to noncamera device info;r=jib MozReview-Commit-ID: 406sbEpJfbk --HG-- extra : rebase_source : 49f4438a4fe6cbf55db5d535bd8b257c343e4d4d --- dom/media/systemservices/VideoEngine.cpp | 29 +++++++++++++++++++++--- dom/media/systemservices/VideoEngine.h | 19 +++++++++++----- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/dom/media/systemservices/VideoEngine.cpp b/dom/media/systemservices/VideoEngine.cpp index 725e3ff670d5c..23ce824a3bffc 100644 --- a/dom/media/systemservices/VideoEngine.cpp +++ b/dom/media/systemservices/VideoEngine.cpp @@ -6,6 +6,7 @@ #include "VideoEngine.h" #include "webrtc/video_engine/browser_capture_impl.h" +#include "webrtc/system_wrappers/include/clock.h" #ifdef WEBRTC_ANDROID #include "webrtc/modules/video_capture/video_capture.h" #endif @@ -21,7 +22,6 @@ mozilla::LazyLogModule gVideoEngineLog("VideoEngine"); #define LOG_ENABLED() MOZ_LOG_TEST(gVideoEngineLog, mozilla::LogLevel::Debug) int VideoEngine::sId = 0; - #if defined(ANDROID) int VideoEngine::SetAndroidObjects(JavaVM* javaVM) { LOG((__PRETTY_FUNCTION__)); @@ -59,7 +59,7 @@ VideoEngine::CreateVideoCapture(int32_t& id, const char* deviceUniqueIdUTF8) { MOZ_ASSERT("CreateVideoCapture NO DESKTOP CAPTURE IMPL ON ANDROID" == nullptr); #endif } - mCaps.emplace(id,std::move(entry)); + mCaps.emplace(id, std::move(entry)); } int @@ -74,9 +74,32 @@ VideoEngine::ReleaseVideoCapture(const int32_t id) { std::shared_ptr VideoEngine::GetOrCreateVideoCaptureDeviceInfo() { + LOG((__PRETTY_FUNCTION__)); + int64_t currentTime = 0; + + const char * capDevTypeName = + webrtc::CaptureDeviceInfo(mCaptureDevInfo.type).TypeName(); + if (mDeviceInfo) { - return mDeviceInfo; + // Camera cache is invalidated by HW change detection elsewhere + if (mCaptureDevInfo.type == webrtc::CaptureDeviceType::Camera) { + LOG(("returning cached CaptureDeviceInfo of type %s", capDevTypeName)); + return mDeviceInfo; + } + // Screen sharing cache is invalidated after the expiration time + currentTime = webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds(); + if (currentTime <= mExpiryTimeInMs) { + LOG(("returning cached CaptureDeviceInfo of type %s", capDevTypeName)); + return mDeviceInfo; + } + } + + if (currentTime == 0) { + currentTime = webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds(); } + mExpiryTimeInMs = currentTime + kCacheExpiryPeriodMs; + LOG(("creating a new VideoCaptureDeviceInfo of type %s", capDevTypeName)); + switch (mCaptureDevInfo.type) { case webrtc::CaptureDeviceType::Camera: { mDeviceInfo.reset(webrtc::VideoCaptureFactory::CreateDeviceInfo()); diff --git a/dom/media/systemservices/VideoEngine.h b/dom/media/systemservices/VideoEngine.h index 4423e1ba1d7e2..2b5a826058f1e 100644 --- a/dom/media/systemservices/VideoEngine.h +++ b/dom/media/systemservices/VideoEngine.h @@ -26,6 +26,10 @@ class VideoEngine { private: virtual ~VideoEngine (){}; + // Base cache expiration period + // Note because cameras use HW plug event detection, this + // only applies to screen based modes. + static const int64_t kCacheExpiryPeriodMs = 1000; public: VideoEngine (){}; @@ -42,11 +46,13 @@ class VideoEngine // VideoEngine is responsible for any cleanup in its modules static void Delete(VideoEngine * engine) { } - /** Returns or creates a new new DeviceInfo. - * It is cached to prevent repeated lengthy polling for "realness" - * of the hardware devices. This could be handled in a more elegant - * way in the future. - * @return on failure the shared_ptr will be null, otherwise it will contain a DeviceInfo. + /** Returns an existing or creates a new new DeviceInfo. + * Camera info is cached to prevent repeated lengthy polling for "realness" + * of the hardware devices. Other types of capture, e.g. screen share info, + * are cached for 1 second. This could be handled in a more elegant way in + * the future. + * @return on failure the shared_ptr will be null, otherwise it will contain + * a DeviceInfo. * @see bug 1305212 https://bugzilla.mozilla.org/show_bug.cgi?id=1305212 */ std::shared_ptr GetOrCreateVideoCaptureDeviceInfo(); @@ -88,7 +94,8 @@ class VideoEngine std::shared_ptr mDeviceInfo; UniquePtr mConfig; std::map mCaps; - + // The validity period for non-camera capture device infos` + int64_t mExpiryTimeInMs = 0; int32_t GenerateId(); static int32_t sId; }; From c73e53ea66f0df62d37e826be3d814a0e1a2cc0c Mon Sep 17 00:00:00 2001 From: "J. Ryan Stinnett" Date: Tue, 11 Jul 2017 13:39:22 -0500 Subject: [PATCH 010/152] Bug 1374748 - Use regular builds for Stylo via env var. r=catlee MozReview-Commit-ID: BCMAuSRH1sQ --HG-- extra : rebase_source : cd91e006a49fa7273cc3075c4960f6fbfc3611cc --- taskcluster/ci/build/linux.yml | 54 ------------------- taskcluster/ci/test/test-platforms.yml | 8 +-- taskcluster/taskgraph/transforms/tests.py | 17 ++++++ .../mozharness/mozilla/testing/talos.py | 14 +++++ .../mozharness/scripts/desktop_unittest.py | 11 ++++ .../mozharness/scripts/web_platform_tests.py | 8 +++ 6 files changed, 54 insertions(+), 58 deletions(-) diff --git a/taskcluster/ci/build/linux.yml b/taskcluster/ci/build/linux.yml index 44b750ee2cbd3..3cb4b221e60ff 100644 --- a/taskcluster/ci/build/linux.yml +++ b/taskcluster/ci/build/linux.yml @@ -423,60 +423,6 @@ linux64-nightly/opt: tooltool-downloads: public need-xvfb: true -linux64-stylo/opt: - description: "Linux64 Opt Stylo" - index: - product: firefox - job-name: linux64-stylo-opt - treeherder: - platform: linux64-stylo/opt - symbol: tc(B) - tier: 2 - worker-type: aws-provisioner-v1/gecko-{level}-b-linux - worker: - max-run-time: 3600 - env: - TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux64/releng.manifest" - run: - using: mozharness - actions: [get-secrets build check-test update] - config: - - builds/releng_base_linux_64_builds.py - - balrog/production.py - script: "mozharness/scripts/fx_desktop_build.py" - secrets: true - custom-build-variant-cfg: stylo - tooltool-downloads: public - need-xvfb: true - run-on-projects: [ 'trunk', 'try' ] - -linux64-stylo/debug: - description: "Linux64 Debug Stylo" - index: - product: firefox - job-name: linux64-stylo-debug - treeherder: - platform: linux64-stylo/debug - symbol: tc(B) - tier: 2 - worker-type: aws-provisioner-v1/gecko-{level}-b-linux - worker: - max-run-time: 3600 - env: - TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux64/releng.manifest" - run: - using: mozharness - actions: [get-secrets build check-test update] - config: - - builds/releng_base_linux_64_builds.py - - balrog/production.py - script: "mozharness/scripts/fx_desktop_build.py" - secrets: true - custom-build-variant-cfg: stylo-debug - tooltool-downloads: public - need-xvfb: true - run-on-projects: [ 'trunk', 'try' ] - linux64-noopt/debug: description: "Linux64 No-optimize Debug" index: diff --git a/taskcluster/ci/test/test-platforms.yml b/taskcluster/ci/test/test-platforms.yml index 15a1cbcf33c28..2ab9327e9406f 100644 --- a/taskcluster/ci/test/test-platforms.yml +++ b/taskcluster/ci/test/test-platforms.yml @@ -87,20 +87,20 @@ linux64-asan/opt: # Stylo builds only run a subset of tests for the moment. So give them # their own test set. linux64-stylo/debug: - build-platform: linux64-stylo/debug + build-platform: linux64/debug test-sets: - stylo-tests linux64-stylo/opt: - build-platform: linux64-stylo/opt + build-platform: linux64/opt test-sets: - stylo-tests - talos linux64-stylo-sequential/debug: - build-platform: linux64-stylo/debug + build-platform: linux64/debug test-sets: - stylo-sequential-tests linux64-stylo-sequential/opt: - build-platform: linux64-stylo/opt + build-platform: linux64/opt test-sets: - stylo-sequential-tests - talos diff --git a/taskcluster/taskgraph/transforms/tests.py b/taskcluster/taskgraph/transforms/tests.py index 10ce441dfec7a..af104dff8c3cc 100644 --- a/taskcluster/taskgraph/transforms/tests.py +++ b/taskcluster/taskgraph/transforms/tests.py @@ -698,6 +698,23 @@ def set_test_type(config, tests): yield test +@transforms.add +def enable_stylo(config, tests): + """ + Force Stylo on for all its tests, except Stylo vs. Gecko reftests where the + test harness will handle this. + """ + for test in tests: + if '-stylo' not in test['test-platform']: + yield test + continue + + if 'reftest-stylo' not in test['suite']: + test['mozharness'].setdefault('extra-options', []).append('--enable-stylo') + + yield test + + @transforms.add def parallel_stylo_tests(config, tests): """Ensure that any stylo tests running with e10s enabled also test diff --git a/testing/mozharness/mozharness/mozilla/testing/talos.py b/testing/mozharness/mozharness/mozilla/testing/talos.py index f7eeeac0608ae..7b313372b4c6f 100755 --- a/testing/mozharness/mozharness/mozilla/testing/talos.py +++ b/testing/mozharness/mozharness/mozilla/testing/talos.py @@ -142,6 +142,12 @@ class Talos(TestingMixin, MercurialScript, BlobUploadMixin, TooltoolMixin, "default": 0, "help": "The interval between samples taken by the profiler (milliseconds)" }], + [["--enable-stylo"], { + "action": "store_true", + "dest": "enable_stylo", + "default": False, + "help": "Run tests with Stylo enabled" + }], ] + testing_config_options + copy.deepcopy(blobupload_config_options) \ + copy.deepcopy(code_coverage_config_options) @@ -557,6 +563,14 @@ def run_tests(self, args=None, **kw): if self.obj_path is not None: env['MOZ_DEVELOPER_OBJ_DIR'] = self.obj_path + if self.config['enable_stylo']: + env['STYLO_FORCE_ENABLED'] = '1' + # Remove once Talos is migrated away from buildbot + if self.buildbot_config: + platform = self.buildbot_config.get('properties', {}).get('platform', '') + if 'stylo' in platform: + env['STYLO_FORCE_ENABLED'] = '1' + # sets a timeout for how long talos should run without output output_timeout = self.config.get('talos_output_timeout', 3600) # run talos tests diff --git a/testing/mozharness/scripts/desktop_unittest.py b/testing/mozharness/scripts/desktop_unittest.py index 29a6e5a9a24d6..a778722550bb1 100755 --- a/testing/mozharness/scripts/desktop_unittest.py +++ b/testing/mozharness/scripts/desktop_unittest.py @@ -150,6 +150,12 @@ class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMix "default": False, "help": "Forcibly enable parallel traversal in Stylo with STYLO_THREADS=4"} ], + [["--enable-stylo"], { + "action": "store_true", + "dest": "enable_stylo", + "default": False, + "help": "Run tests with Stylo enabled"} + ], [["--enable-webrender"], { "action": "store_true", "dest": "enable_webrender", @@ -216,6 +222,9 @@ def __init__(self, require_config_file=True): if c['e10s']: perfherder_options.append('e10s') + if c['enable_stylo']: + perfherder_options.append('stylo') + self.resource_monitor_perfherder_id = ('.'.join(perfherder_parts), perfherder_options) @@ -709,6 +718,8 @@ def _run_category_suites(self, suite_category): env['MOZ_WEBRENDER'] = '1' env['STYLO_THREADS'] = '4' if self.config['parallel_stylo_traversal'] else '1' + if self.config['enable_stylo']: + env['STYLO_FORCE_ENABLED'] = '1' env = self.query_env(partial_env=env, log_level=INFO) cmd_timeout = self.get_timeout_for_category(suite_category) diff --git a/testing/mozharness/scripts/web_platform_tests.py b/testing/mozharness/scripts/web_platform_tests.py index e05785535b8eb..5af365e0fae1a 100755 --- a/testing/mozharness/scripts/web_platform_tests.py +++ b/testing/mozharness/scripts/web_platform_tests.py @@ -68,6 +68,12 @@ class WebPlatformTest(TestingMixin, MercurialScript, BlobUploadMixin, CodeCovera "default": False, "help": "Forcibly enable parallel traversal in Stylo with STYLO_THREADS=4"} ], + [["--enable-stylo"], { + "action": "store_true", + "dest": "enable_stylo", + "default": False, + "help": "Run tests with Stylo enabled"} + ], ] + copy.deepcopy(testing_config_options) + \ copy.deepcopy(blobupload_config_options) + \ copy.deepcopy(code_coverage_config_options) @@ -254,6 +260,8 @@ def run_tests(self): env['MOZ_WEBRENDER'] = '1' env['STYLO_THREADS'] = '4' if self.config['parallel_stylo_traversal'] else '1' + if self.config['enable_stylo']: + env['STYLO_FORCE_ENABLED'] = '1' env = self.query_env(partial_env=env, log_level=INFO) From 5c71ab391cdb3c7bc19ad3c5e951df956ffbf5b3 Mon Sep 17 00:00:00 2001 From: Andrew Halberstadt Date: Wed, 19 Jul 2017 16:50:57 -0400 Subject: [PATCH 011/152] Bug 1382362 - Update pytest to v3.1.3 and py to v1.4.34, r=davehunt This patch was generated by something similar to: $ cd third_party/python $ hg rm pytest/* py/* $ pip wheel pytest $ unzip pytest.whl $ unzip py.whl $ hg add pytest/* py/* MozReview-Commit-ID: 3LKVrbKfMgK --HG-- extra : rebase_source : 4204340a78501a8e44e83dbf9cae63a7e91541ef --- third_party/python/py/AUTHORS | 24 - third_party/python/py/LICENSE | 19 - third_party/python/py/MANIFEST.in | 9 - third_party/python/py/PKG-INFO | 46 - third_party/python/py/README.txt | 21 - third_party/python/py/py/__init__.py | 302 +- third_party/python/py/py/__metainfo.py | 4 +- third_party/python/py/py/_apipkg.py | 362 +-- third_party/python/py/py/_builtin.py | 496 ++-- third_party/python/py/py/_code/__init__.py | 2 +- .../python/py/py/_code/_assertionnew.py | 678 ++--- .../python/py/py/_code/_assertionold.py | 1110 ++++---- .../python/py/py/_code/_py2traceback.py | 158 +- third_party/python/py/py/_code/assertion.py | 188 +- third_party/python/py/py/_code/code.py | 1574 +++++------ third_party/python/py/py/_code/source.py | 830 +++--- third_party/python/py/py/_error.py | 177 +- third_party/python/py/py/_iniconfig.py | 324 +-- third_party/python/py/py/_io/__init__.py | 2 +- third_party/python/py/py/_io/capture.py | 742 ++--- third_party/python/py/py/_io/saferepr.py | 142 +- .../python/py/py/_io/terminalwriter.py | 705 ++--- third_party/python/py/py/_log/__init__.py | 4 +- third_party/python/py/py/_log/log.py | 372 +-- third_party/python/py/py/_log/warning.py | 152 +- third_party/python/py/py/_path/__init__.py | 2 +- third_party/python/py/py/_path/cacheutil.py | 228 +- third_party/python/py/py/_path/common.py | 848 +++--- third_party/python/py/py/_path/local.py | 1841 ++++++------ third_party/python/py/py/_path/svnurl.py | 760 ++--- third_party/python/py/py/_path/svnwc.py | 2480 ++++++++--------- third_party/python/py/py/_process/__init__.py | 2 +- third_party/python/py/py/_process/cmdexec.py | 98 +- .../python/py/py/_process/forkedfunc.py | 240 +- third_party/python/py/py/_process/killproc.py | 46 +- third_party/python/py/py/_std.py | 36 +- third_party/python/py/py/_xmlgen.py | 508 ++-- third_party/python/py/py/test.py | 20 +- third_party/python/py/setup.cfg | 11 - third_party/python/py/setup.py | 38 - third_party/python/pytest/.coveragerc | 7 - third_party/python/pytest/AUTHORS | 91 - third_party/python/pytest/LICENSE | 21 - third_party/python/pytest/MANIFEST.in | 34 - third_party/python/pytest/PKG-INFO | 133 - third_party/python/pytest/README.rst | 102 - third_party/python/pytest/_pytest/__init__.py | 10 +- .../python/pytest/_pytest/_argcomplete.py | 3 +- .../python/pytest/_pytest/_code/__init__.py | 4 +- .../pytest/_pytest/_code/_py2traceback.py | 1 + .../python/pytest/_pytest/_code/code.py | 228 +- .../python/pytest/_pytest/_code/source.py | 25 +- third_party/python/pytest/_pytest/_pluggy.py | 2 +- third_party/python/pytest/_pytest/_version.py | 4 + .../pytest/_pytest/assertion/__init__.py | 165 +- .../pytest/_pytest/assertion/reinterpret.py | 407 --- .../pytest/_pytest/assertion/rewrite.py | 120 +- .../pytest/_pytest/assertion/truncate.py | 102 + .../python/pytest/_pytest/assertion/util.py | 47 +- .../python/pytest/_pytest/cacheprovider.py | 14 +- third_party/python/pytest/_pytest/capture.py | 102 +- third_party/python/pytest/_pytest/compat.py | 307 ++ third_party/python/pytest/_pytest/config.py | 407 ++- .../pytest/_pytest/{pdb.py => debugging.py} | 46 +- .../python/pytest/_pytest/deprecated.py | 24 + third_party/python/pytest/_pytest/doctest.py | 104 +- third_party/python/pytest/_pytest/fixtures.py | 1129 ++++++++ .../python/pytest/_pytest/freeze_support.py | 44 + .../python/pytest/_pytest/genscript.py | 132 - .../python/pytest/_pytest/helpconfig.py | 52 +- third_party/python/pytest/_pytest/hookspec.py | 98 +- third_party/python/pytest/_pytest/junitxml.py | 107 +- third_party/python/pytest/_pytest/main.py | 260 +- third_party/python/pytest/_pytest/mark.py | 162 +- .../python/pytest/_pytest/monkeypatch.py | 27 +- third_party/python/pytest/_pytest/nose.py | 19 +- third_party/python/pytest/_pytest/pastebin.py | 8 + third_party/python/pytest/_pytest/pytester.py | 153 +- third_party/python/pytest/_pytest/python.py | 2039 +++++--------- third_party/python/pytest/_pytest/recwarn.py | 137 +- .../python/pytest/_pytest/resultlog.py | 10 +- third_party/python/pytest/_pytest/runner.py | 109 +- .../python/pytest/_pytest/setuponly.py | 74 + .../python/pytest/_pytest/setupplan.py | 25 + third_party/python/pytest/_pytest/skipping.py | 109 +- .../pytest/_pytest/standalonetemplate.py | 89 - third_party/python/pytest/_pytest/terminal.py | 120 +- third_party/python/pytest/_pytest/tmpdir.py | 11 +- third_party/python/pytest/_pytest/unittest.py | 84 +- .../_pytest/vendored_packages/README.md | 13 - .../pluggy-0.3.1.dist-info/DESCRIPTION.rst | 10 - .../pluggy-0.3.1.dist-info/METADATA | 39 - .../pluggy-0.3.1.dist-info/RECORD | 8 - .../pluggy-0.3.1.dist-info/WHEEL | 6 - .../pluggy-0.3.1.dist-info/metadata.json | 1 - .../pluggy-0.3.1.dist-info/pbr.json | 1 - .../pluggy-0.3.1.dist-info/top_level.txt | 1 - .../_pytest/vendored_packages/pluggy.py | 43 +- third_party/python/pytest/_pytest/warnings.py | 88 + third_party/python/pytest/pytest.py | 72 +- third_party/python/pytest/setup.cfg | 19 - third_party/python/pytest/setup.py | 122 - 102 files changed, 12145 insertions(+), 11387 deletions(-) delete mode 100644 third_party/python/py/AUTHORS delete mode 100644 third_party/python/py/LICENSE delete mode 100644 third_party/python/py/MANIFEST.in delete mode 100644 third_party/python/py/PKG-INFO delete mode 100644 third_party/python/py/README.txt delete mode 100644 third_party/python/py/setup.cfg delete mode 100644 third_party/python/py/setup.py delete mode 100644 third_party/python/pytest/.coveragerc delete mode 100644 third_party/python/pytest/AUTHORS delete mode 100644 third_party/python/pytest/LICENSE delete mode 100644 third_party/python/pytest/MANIFEST.in delete mode 100644 third_party/python/pytest/PKG-INFO delete mode 100644 third_party/python/pytest/README.rst create mode 100644 third_party/python/pytest/_pytest/_version.py delete mode 100644 third_party/python/pytest/_pytest/assertion/reinterpret.py create mode 100644 third_party/python/pytest/_pytest/assertion/truncate.py mode change 100755 => 100644 third_party/python/pytest/_pytest/cacheprovider.py create mode 100644 third_party/python/pytest/_pytest/compat.py rename third_party/python/pytest/_pytest/{pdb.py => debugging.py} (69%) create mode 100644 third_party/python/pytest/_pytest/deprecated.py create mode 100644 third_party/python/pytest/_pytest/fixtures.py create mode 100644 third_party/python/pytest/_pytest/freeze_support.py delete mode 100755 third_party/python/pytest/_pytest/genscript.py create mode 100644 third_party/python/pytest/_pytest/setuponly.py create mode 100644 third_party/python/pytest/_pytest/setupplan.py delete mode 100755 third_party/python/pytest/_pytest/standalonetemplate.py delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/README.md delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json delete mode 100644 third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt create mode 100644 third_party/python/pytest/_pytest/warnings.py delete mode 100644 third_party/python/pytest/setup.cfg delete mode 100644 third_party/python/pytest/setup.py diff --git a/third_party/python/py/AUTHORS b/third_party/python/py/AUTHORS deleted file mode 100644 index 8c0cf9b71b15d..0000000000000 --- a/third_party/python/py/AUTHORS +++ /dev/null @@ -1,24 +0,0 @@ -Holger Krekel, holger at merlinux eu -Benjamin Peterson, benjamin at python org -Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de -Guido Wesdorp, johnny at johnnydebris net -Samuele Pedroni, pedronis at openend se -Carl Friedrich Bolz, cfbolz at gmx de -Armin Rigo, arigo at tunes org -Maciek Fijalkowski, fijal at genesilico pl -Brian Dorsey, briandorsey at gmail com -Floris Bruynooghe, flub at devork be -merlinux GmbH, Germany, office at merlinux eu - -Contributors include:: - -Ross Lawley -Ralf Schmitt -Chris Lamb -Harald Armin Massa -Martijn Faassen -Ian Bicking -Jan Balster -Grig Gheorghiu -Bob Ippolito -Christian Tismer diff --git a/third_party/python/py/LICENSE b/third_party/python/py/LICENSE deleted file mode 100644 index 31ecdfb1dbc54..0000000000000 --- a/third_party/python/py/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - diff --git a/third_party/python/py/MANIFEST.in b/third_party/python/py/MANIFEST.in deleted file mode 100644 index 31fb010b4879b..0000000000000 --- a/third_party/python/py/MANIFEST.in +++ /dev/null @@ -1,9 +0,0 @@ -include CHANGELOG -include AUTHORS -include README.txt -include setup.py -include LICENSE -include conftest.py -include tox.ini -graft doc -graft testing diff --git a/third_party/python/py/PKG-INFO b/third_party/python/py/PKG-INFO deleted file mode 100644 index 30b14ae887337..0000000000000 --- a/third_party/python/py/PKG-INFO +++ /dev/null @@ -1,46 +0,0 @@ -Metadata-Version: 1.1 -Name: py -Version: 1.4.31 -Summary: library with cross-python path, ini-parsing, io, code, log facilities -Home-page: http://pylib.readthedocs.org/ -Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others -Author-email: pytest-dev@python.org -License: MIT license -Description: .. image:: https://drone.io/bitbucket.org/pytest-dev/py/status.png - :target: https://drone.io/bitbucket.org/pytest-dev/py/latest - .. image:: https://pypip.in/v/py/badge.png - :target: https://pypi.python.org/pypi/py - - The py lib is a Python development support library featuring - the following tools and modules: - - * py.path: uniform local and svn path objects - * py.apipkg: explicit API control and lazy-importing - * py.iniconfig: easy parsing of .ini files - * py.code: dynamic code generation and introspection - - NOTE: prior to the 1.4 release this distribution used to - contain py.test which is now its own package, see http://pytest.org - - For questions and more information please visit http://pylib.readthedocs.org - - Bugs and issues: http://bitbucket.org/pytest-dev/py/issues/ - - Authors: Holger Krekel and others, 2004-2015 - -Platform: unix -Platform: linux -Platform: osx -Platform: cygwin -Platform: win32 -Classifier: Development Status :: 6 - Mature -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: MacOS :: MacOS X -Classifier: Topic :: Software Development :: Testing -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Utilities -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 diff --git a/third_party/python/py/README.txt b/third_party/python/py/README.txt deleted file mode 100644 index e327e93737320..0000000000000 --- a/third_party/python/py/README.txt +++ /dev/null @@ -1,21 +0,0 @@ -.. image:: https://drone.io/bitbucket.org/pytest-dev/py/status.png - :target: https://drone.io/bitbucket.org/pytest-dev/py/latest -.. image:: https://pypip.in/v/py/badge.png - :target: https://pypi.python.org/pypi/py - -The py lib is a Python development support library featuring -the following tools and modules: - -* py.path: uniform local and svn path objects -* py.apipkg: explicit API control and lazy-importing -* py.iniconfig: easy parsing of .ini files -* py.code: dynamic code generation and introspection - -NOTE: prior to the 1.4 release this distribution used to -contain py.test which is now its own package, see http://pytest.org - -For questions and more information please visit http://pylib.readthedocs.org - -Bugs and issues: http://bitbucket.org/pytest-dev/py/issues/ - -Authors: Holger Krekel and others, 2004-2015 diff --git a/third_party/python/py/py/__init__.py b/third_party/python/py/py/__init__.py index bdb9aa2181f66..4a2b6fa89bb87 100644 --- a/third_party/python/py/py/__init__.py +++ b/third_party/python/py/py/__init__.py @@ -1,150 +1,152 @@ -""" -py.test and pylib: rapid testing and development utils - -this module uses apipkg.py for lazy-loading sub modules -and classes. The initpkg-dictionary below specifies -name->value mappings where value can be another namespace -dictionary or an import path. - -(c) Holger Krekel and others, 2004-2014 -""" -__version__ = '1.4.31' - -from py import _apipkg - -# so that py.error.* instances are picklable -import sys -sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error') - -_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={ - # access to all standard lib modules - 'std': '._std:std', - # access to all posix errno's as classes - 'error': '._error:error', - - '_pydir' : '.__metainfo:pydir', - 'version': 'py:__version__', # backward compatibility - - # pytest-2.0 has a flat namespace, we use alias modules - # to keep old references compatible - 'test' : 'pytest', - 'test.collect' : 'pytest', - 'test.cmdline' : 'pytest', - - # hook into the top-level standard library - 'process' : { - '__doc__' : '._process:__doc__', - 'cmdexec' : '._process.cmdexec:cmdexec', - 'kill' : '._process.killproc:kill', - 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', - }, - - 'apipkg' : { - 'initpkg' : '._apipkg:initpkg', - 'ApiModule' : '._apipkg:ApiModule', - }, - - 'iniconfig' : { - 'IniConfig' : '._iniconfig:IniConfig', - 'ParseError' : '._iniconfig:ParseError', - }, - - 'path' : { - '__doc__' : '._path:__doc__', - 'svnwc' : '._path.svnwc:SvnWCCommandPath', - 'svnurl' : '._path.svnurl:SvnCommandPath', - 'local' : '._path.local:LocalPath', - 'SvnAuth' : '._path.svnwc:SvnAuth', - }, - - # python inspection/code-generation API - 'code' : { - '__doc__' : '._code:__doc__', - 'compile' : '._code.source:compile_', - 'Source' : '._code.source:Source', - 'Code' : '._code.code:Code', - 'Frame' : '._code.code:Frame', - 'ExceptionInfo' : '._code.code:ExceptionInfo', - 'Traceback' : '._code.code:Traceback', - 'getfslineno' : '._code.source:getfslineno', - 'getrawcode' : '._code.code:getrawcode', - 'patch_builtins' : '._code.code:patch_builtins', - 'unpatch_builtins' : '._code.code:unpatch_builtins', - '_AssertionError' : '._code.assertion:AssertionError', - '_reinterpret_old' : '._code.assertion:reinterpret_old', - '_reinterpret' : '._code.assertion:reinterpret', - '_reprcompare' : '._code.assertion:_reprcompare', - '_format_explanation' : '._code.assertion:_format_explanation', - }, - - # backports and additions of builtins - 'builtin' : { - '__doc__' : '._builtin:__doc__', - 'enumerate' : '._builtin:enumerate', - 'reversed' : '._builtin:reversed', - 'sorted' : '._builtin:sorted', - 'any' : '._builtin:any', - 'all' : '._builtin:all', - 'set' : '._builtin:set', - 'frozenset' : '._builtin:frozenset', - 'BaseException' : '._builtin:BaseException', - 'GeneratorExit' : '._builtin:GeneratorExit', - '_sysex' : '._builtin:_sysex', - 'print_' : '._builtin:print_', - '_reraise' : '._builtin:_reraise', - '_tryimport' : '._builtin:_tryimport', - 'exec_' : '._builtin:exec_', - '_basestring' : '._builtin:_basestring', - '_totext' : '._builtin:_totext', - '_isbytes' : '._builtin:_isbytes', - '_istext' : '._builtin:_istext', - '_getimself' : '._builtin:_getimself', - '_getfuncdict' : '._builtin:_getfuncdict', - '_getcode' : '._builtin:_getcode', - 'builtins' : '._builtin:builtins', - 'execfile' : '._builtin:execfile', - 'callable' : '._builtin:callable', - 'bytes' : '._builtin:bytes', - 'text' : '._builtin:text', - }, - - # input-output helping - 'io' : { - '__doc__' : '._io:__doc__', - 'dupfile' : '._io.capture:dupfile', - 'TextIO' : '._io.capture:TextIO', - 'BytesIO' : '._io.capture:BytesIO', - 'FDCapture' : '._io.capture:FDCapture', - 'StdCapture' : '._io.capture:StdCapture', - 'StdCaptureFD' : '._io.capture:StdCaptureFD', - 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', - 'ansi_print' : '._io.terminalwriter:ansi_print', - 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', - 'saferepr' : '._io.saferepr:saferepr', - }, - - # small and mean xml/html generation - 'xml' : { - '__doc__' : '._xmlgen:__doc__', - 'html' : '._xmlgen:html', - 'Tag' : '._xmlgen:Tag', - 'raw' : '._xmlgen:raw', - 'Namespace' : '._xmlgen:Namespace', - 'escape' : '._xmlgen:escape', - }, - - 'log' : { - # logging API ('producers' and 'consumers' connected via keywords) - '__doc__' : '._log:__doc__', - '_apiwarn' : '._log.warning:_apiwarn', - 'Producer' : '._log.log:Producer', - 'setconsumer' : '._log.log:setconsumer', - '_setstate' : '._log.log:setstate', - '_getstate' : '._log.log:getstate', - 'Path' : '._log.log:Path', - 'STDOUT' : '._log.log:STDOUT', - 'STDERR' : '._log.log:STDERR', - 'Syslog' : '._log.log:Syslog', - }, - -}) +""" +pylib: rapid testing and development utils + +this module uses apipkg.py for lazy-loading sub modules +and classes. The initpkg-dictionary below specifies +name->value mappings where value can be another namespace +dictionary or an import path. + +(c) Holger Krekel and others, 2004-2014 +""" +__version__ = '1.4.34' + +from py import _apipkg + +# so that py.error.* instances are picklable +import sys +sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error') +import py.error # "Dereference" it now just to be safe (issue110) + + +_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={ + # access to all standard lib modules + 'std': '._std:std', + # access to all posix errno's as classes + 'error': '._error:error', + + '_pydir' : '.__metainfo:pydir', + 'version': 'py:__version__', # backward compatibility + + # pytest-2.0 has a flat namespace, we use alias modules + # to keep old references compatible + 'test' : 'pytest', + 'test.collect' : 'pytest', + 'test.cmdline' : 'pytest', + + # hook into the top-level standard library + 'process' : { + '__doc__' : '._process:__doc__', + 'cmdexec' : '._process.cmdexec:cmdexec', + 'kill' : '._process.killproc:kill', + 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', + }, + + 'apipkg' : { + 'initpkg' : '._apipkg:initpkg', + 'ApiModule' : '._apipkg:ApiModule', + }, + + 'iniconfig' : { + 'IniConfig' : '._iniconfig:IniConfig', + 'ParseError' : '._iniconfig:ParseError', + }, + + 'path' : { + '__doc__' : '._path:__doc__', + 'svnwc' : '._path.svnwc:SvnWCCommandPath', + 'svnurl' : '._path.svnurl:SvnCommandPath', + 'local' : '._path.local:LocalPath', + 'SvnAuth' : '._path.svnwc:SvnAuth', + }, + + # python inspection/code-generation API + 'code' : { + '__doc__' : '._code:__doc__', + 'compile' : '._code.source:compile_', + 'Source' : '._code.source:Source', + 'Code' : '._code.code:Code', + 'Frame' : '._code.code:Frame', + 'ExceptionInfo' : '._code.code:ExceptionInfo', + 'Traceback' : '._code.code:Traceback', + 'getfslineno' : '._code.source:getfslineno', + 'getrawcode' : '._code.code:getrawcode', + 'patch_builtins' : '._code.code:patch_builtins', + 'unpatch_builtins' : '._code.code:unpatch_builtins', + '_AssertionError' : '._code.assertion:AssertionError', + '_reinterpret_old' : '._code.assertion:reinterpret_old', + '_reinterpret' : '._code.assertion:reinterpret', + '_reprcompare' : '._code.assertion:_reprcompare', + '_format_explanation' : '._code.assertion:_format_explanation', + }, + + # backports and additions of builtins + 'builtin' : { + '__doc__' : '._builtin:__doc__', + 'enumerate' : '._builtin:enumerate', + 'reversed' : '._builtin:reversed', + 'sorted' : '._builtin:sorted', + 'any' : '._builtin:any', + 'all' : '._builtin:all', + 'set' : '._builtin:set', + 'frozenset' : '._builtin:frozenset', + 'BaseException' : '._builtin:BaseException', + 'GeneratorExit' : '._builtin:GeneratorExit', + '_sysex' : '._builtin:_sysex', + 'print_' : '._builtin:print_', + '_reraise' : '._builtin:_reraise', + '_tryimport' : '._builtin:_tryimport', + 'exec_' : '._builtin:exec_', + '_basestring' : '._builtin:_basestring', + '_totext' : '._builtin:_totext', + '_isbytes' : '._builtin:_isbytes', + '_istext' : '._builtin:_istext', + '_getimself' : '._builtin:_getimself', + '_getfuncdict' : '._builtin:_getfuncdict', + '_getcode' : '._builtin:_getcode', + 'builtins' : '._builtin:builtins', + 'execfile' : '._builtin:execfile', + 'callable' : '._builtin:callable', + 'bytes' : '._builtin:bytes', + 'text' : '._builtin:text', + }, + + # input-output helping + 'io' : { + '__doc__' : '._io:__doc__', + 'dupfile' : '._io.capture:dupfile', + 'TextIO' : '._io.capture:TextIO', + 'BytesIO' : '._io.capture:BytesIO', + 'FDCapture' : '._io.capture:FDCapture', + 'StdCapture' : '._io.capture:StdCapture', + 'StdCaptureFD' : '._io.capture:StdCaptureFD', + 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', + 'ansi_print' : '._io.terminalwriter:ansi_print', + 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', + 'saferepr' : '._io.saferepr:saferepr', + }, + + # small and mean xml/html generation + 'xml' : { + '__doc__' : '._xmlgen:__doc__', + 'html' : '._xmlgen:html', + 'Tag' : '._xmlgen:Tag', + 'raw' : '._xmlgen:raw', + 'Namespace' : '._xmlgen:Namespace', + 'escape' : '._xmlgen:escape', + }, + + 'log' : { + # logging API ('producers' and 'consumers' connected via keywords) + '__doc__' : '._log:__doc__', + '_apiwarn' : '._log.warning:_apiwarn', + 'Producer' : '._log.log:Producer', + 'setconsumer' : '._log.log:setconsumer', + '_setstate' : '._log.log:setstate', + '_getstate' : '._log.log:getstate', + 'Path' : '._log.log:Path', + 'STDOUT' : '._log.log:STDOUT', + 'STDERR' : '._log.log:STDERR', + 'Syslog' : '._log.log:Syslog', + }, + +}) diff --git a/third_party/python/py/py/__metainfo.py b/third_party/python/py/py/__metainfo.py index 12581eb7afbc2..067806c23dac5 100644 --- a/third_party/python/py/py/__metainfo.py +++ b/third_party/python/py/py/__metainfo.py @@ -1,2 +1,2 @@ -import py -pydir = py.path.local(py.__file__).dirpath() +import py +pydir = py.path.local(py.__file__).dirpath() diff --git a/third_party/python/py/py/_apipkg.py b/third_party/python/py/py/_apipkg.py index a73b8f6d0bc61..42bd29b1fc87d 100644 --- a/third_party/python/py/py/_apipkg.py +++ b/third_party/python/py/py/_apipkg.py @@ -1,181 +1,181 @@ -""" -apipkg: control the exported namespace of a python package. - -see http://pypi.python.org/pypi/apipkg - -(c) holger krekel, 2009 - MIT license -""" -import os -import sys -from types import ModuleType - -__version__ = '1.3.dev' - -def _py_abspath(path): - """ - special version of abspath - that will leave paths from jython jars alone - """ - if path.startswith('__pyclasspath__'): - - return path - else: - return os.path.abspath(path) - -def initpkg(pkgname, exportdefs, attr=dict()): - """ initialize given package from the export definitions. """ - oldmod = sys.modules.get(pkgname) - d = {} - f = getattr(oldmod, '__file__', None) - if f: - f = _py_abspath(f) - d['__file__'] = f - if hasattr(oldmod, '__version__'): - d['__version__'] = oldmod.__version__ - if hasattr(oldmod, '__loader__'): - d['__loader__'] = oldmod.__loader__ - if hasattr(oldmod, '__path__'): - d['__path__'] = [_py_abspath(p) for p in oldmod.__path__] - if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): - d['__doc__'] = oldmod.__doc__ - d.update(attr) - if hasattr(oldmod, "__dict__"): - oldmod.__dict__.update(d) - mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) - sys.modules[pkgname] = mod - -def importobj(modpath, attrname): - module = __import__(modpath, None, None, ['__doc__']) - if not attrname: - return module - - retval = module - names = attrname.split(".") - for x in names: - retval = getattr(retval, x) - return retval - -class ApiModule(ModuleType): - def __docget(self): - try: - return self.__doc - except AttributeError: - if '__doc__' in self.__map__: - return self.__makeattr('__doc__') - def __docset(self, value): - self.__doc = value - __doc__ = property(__docget, __docset) - - def __init__(self, name, importspec, implprefix=None, attr=None): - self.__name__ = name - self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] - self.__map__ = {} - self.__implprefix__ = implprefix or name - if attr: - for name, val in attr.items(): - # print "setting", self.__name__, name, val - setattr(self, name, val) - for name, importspec in importspec.items(): - if isinstance(importspec, dict): - subname = '%s.%s' % (self.__name__, name) - apimod = ApiModule(subname, importspec, implprefix) - sys.modules[subname] = apimod - setattr(self, name, apimod) - else: - parts = importspec.split(':') - modpath = parts.pop(0) - attrname = parts and parts[0] or "" - if modpath[0] == '.': - modpath = implprefix + modpath - - if not attrname: - subname = '%s.%s' % (self.__name__, name) - apimod = AliasModule(subname, modpath) - sys.modules[subname] = apimod - if '.' not in name: - setattr(self, name, apimod) - else: - self.__map__[name] = (modpath, attrname) - - def __repr__(self): - l = [] - if hasattr(self, '__version__'): - l.append("version=" + repr(self.__version__)) - if hasattr(self, '__file__'): - l.append('from ' + repr(self.__file__)) - if l: - return '' % (self.__name__, " ".join(l)) - return '' % (self.__name__,) - - def __makeattr(self, name): - """lazily compute value for name or raise AttributeError if unknown.""" - # print "makeattr", self.__name__, name - target = None - if '__onfirstaccess__' in self.__map__: - target = self.__map__.pop('__onfirstaccess__') - importobj(*target)() - try: - modpath, attrname = self.__map__[name] - except KeyError: - if target is not None and name != '__onfirstaccess__': - # retry, onfirstaccess might have set attrs - return getattr(self, name) - raise AttributeError(name) - else: - result = importobj(modpath, attrname) - setattr(self, name, result) - try: - del self.__map__[name] - except KeyError: - pass # in a recursive-import situation a double-del can happen - return result - - __getattr__ = __makeattr - - def __dict__(self): - # force all the content of the module to be loaded when __dict__ is read - dictdescr = ModuleType.__dict__['__dict__'] - dict = dictdescr.__get__(self) - if dict is not None: - hasattr(self, 'some') - for name in self.__all__: - try: - self.__makeattr(name) - except AttributeError: - pass - return dict - __dict__ = property(__dict__) - - -def AliasModule(modname, modpath, attrname=None): - mod = [] - - def getmod(): - if not mod: - x = importobj(modpath, None) - if attrname is not None: - x = getattr(x, attrname) - mod.append(x) - return mod[0] - - class AliasModule(ModuleType): - - def __repr__(self): - x = modpath - if attrname: - x += "." + attrname - return '' % (modname, x) - - def __getattribute__(self, name): - try: - return getattr(getmod(), name) - except ImportError: - return None - - def __setattr__(self, name, value): - setattr(getmod(), name, value) - - def __delattr__(self, name): - delattr(getmod(), name) - - return AliasModule(str(modname)) +""" +apipkg: control the exported namespace of a python package. + +see http://pypi.python.org/pypi/apipkg + +(c) holger krekel, 2009 - MIT license +""" +import os +import sys +from types import ModuleType + +__version__ = '1.3.dev' + +def _py_abspath(path): + """ + special version of abspath + that will leave paths from jython jars alone + """ + if path.startswith('__pyclasspath__'): + + return path + else: + return os.path.abspath(path) + +def initpkg(pkgname, exportdefs, attr=dict()): + """ initialize given package from the export definitions. """ + oldmod = sys.modules.get(pkgname) + d = {} + f = getattr(oldmod, '__file__', None) + if f: + f = _py_abspath(f) + d['__file__'] = f + if hasattr(oldmod, '__version__'): + d['__version__'] = oldmod.__version__ + if hasattr(oldmod, '__loader__'): + d['__loader__'] = oldmod.__loader__ + if hasattr(oldmod, '__path__'): + d['__path__'] = [_py_abspath(p) for p in oldmod.__path__] + if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): + d['__doc__'] = oldmod.__doc__ + d.update(attr) + if hasattr(oldmod, "__dict__"): + oldmod.__dict__.update(d) + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) + sys.modules[pkgname] = mod + +def importobj(modpath, attrname): + module = __import__(modpath, None, None, ['__doc__']) + if not attrname: + return module + + retval = module + names = attrname.split(".") + for x in names: + retval = getattr(retval, x) + return retval + +class ApiModule(ModuleType): + def __docget(self): + try: + return self.__doc + except AttributeError: + if '__doc__' in self.__map__: + return self.__makeattr('__doc__') + def __docset(self, value): + self.__doc = value + __doc__ = property(__docget, __docset) + + def __init__(self, name, importspec, implprefix=None, attr=None): + self.__name__ = name + self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] + self.__map__ = {} + self.__implprefix__ = implprefix or name + if attr: + for name, val in attr.items(): + # print "setting", self.__name__, name, val + setattr(self, name, val) + for name, importspec in importspec.items(): + if isinstance(importspec, dict): + subname = '%s.%s' % (self.__name__, name) + apimod = ApiModule(subname, importspec, implprefix) + sys.modules[subname] = apimod + setattr(self, name, apimod) + else: + parts = importspec.split(':') + modpath = parts.pop(0) + attrname = parts and parts[0] or "" + if modpath[0] == '.': + modpath = implprefix + modpath + + if not attrname: + subname = '%s.%s' % (self.__name__, name) + apimod = AliasModule(subname, modpath) + sys.modules[subname] = apimod + if '.' not in name: + setattr(self, name, apimod) + else: + self.__map__[name] = (modpath, attrname) + + def __repr__(self): + l = [] + if hasattr(self, '__version__'): + l.append("version=" + repr(self.__version__)) + if hasattr(self, '__file__'): + l.append('from ' + repr(self.__file__)) + if l: + return '' % (self.__name__, " ".join(l)) + return '' % (self.__name__,) + + def __makeattr(self, name): + """lazily compute value for name or raise AttributeError if unknown.""" + # print "makeattr", self.__name__, name + target = None + if '__onfirstaccess__' in self.__map__: + target = self.__map__.pop('__onfirstaccess__') + importobj(*target)() + try: + modpath, attrname = self.__map__[name] + except KeyError: + if target is not None and name != '__onfirstaccess__': + # retry, onfirstaccess might have set attrs + return getattr(self, name) + raise AttributeError(name) + else: + result = importobj(modpath, attrname) + setattr(self, name, result) + try: + del self.__map__[name] + except KeyError: + pass # in a recursive-import situation a double-del can happen + return result + + __getattr__ = __makeattr + + def __dict__(self): + # force all the content of the module to be loaded when __dict__ is read + dictdescr = ModuleType.__dict__['__dict__'] + dict = dictdescr.__get__(self) + if dict is not None: + hasattr(self, 'some') + for name in self.__all__: + try: + self.__makeattr(name) + except AttributeError: + pass + return dict + __dict__ = property(__dict__) + + +def AliasModule(modname, modpath, attrname=None): + mod = [] + + def getmod(): + if not mod: + x = importobj(modpath, None) + if attrname is not None: + x = getattr(x, attrname) + mod.append(x) + return mod[0] + + class AliasModule(ModuleType): + + def __repr__(self): + x = modpath + if attrname: + x += "." + attrname + return '' % (modname, x) + + def __getattribute__(self, name): + try: + return getattr(getmod(), name) + except ImportError: + return None + + def __setattr__(self, name, value): + setattr(getmod(), name, value) + + def __delattr__(self, name): + delattr(getmod(), name) + + return AliasModule(str(modname)) diff --git a/third_party/python/py/py/_builtin.py b/third_party/python/py/py/_builtin.py index 52ee9d79cadce..6199afadd49fe 100644 --- a/third_party/python/py/py/_builtin.py +++ b/third_party/python/py/py/_builtin.py @@ -1,248 +1,248 @@ -import sys - -try: - reversed = reversed -except NameError: - def reversed(sequence): - """reversed(sequence) -> reverse iterator over values of the sequence - - Return a reverse iterator - """ - if hasattr(sequence, '__reversed__'): - return sequence.__reversed__() - if not hasattr(sequence, '__getitem__'): - raise TypeError("argument to reversed() must be a sequence") - return reversed_iterator(sequence) - - class reversed_iterator(object): - - def __init__(self, seq): - self.seq = seq - self.remaining = len(seq) - - def __iter__(self): - return self - - def next(self): - i = self.remaining - if i > 0: - i -= 1 - item = self.seq[i] - self.remaining = i - return item - raise StopIteration - - def __length_hint__(self): - return self.remaining - -try: - any = any -except NameError: - def any(iterable): - for x in iterable: - if x: - return True - return False - -try: - all = all -except NameError: - def all(iterable): - for x in iterable: - if not x: - return False - return True - -try: - sorted = sorted -except NameError: - builtin_cmp = cmp # need to use cmp as keyword arg - - def sorted(iterable, cmp=None, key=None, reverse=0): - use_cmp = None - if key is not None: - if cmp is None: - def use_cmp(x, y): - return builtin_cmp(x[0], y[0]) - else: - def use_cmp(x, y): - return cmp(x[0], y[0]) - l = [(key(element), element) for element in iterable] - else: - if cmp is not None: - use_cmp = cmp - l = list(iterable) - if use_cmp is not None: - l.sort(use_cmp) - else: - l.sort() - if reverse: - l.reverse() - if key is not None: - return [element for (_, element) in l] - return l - -try: - set, frozenset = set, frozenset -except NameError: - from sets import set, frozenset - -# pass through -enumerate = enumerate - -try: - BaseException = BaseException -except NameError: - BaseException = Exception - -try: - GeneratorExit = GeneratorExit -except NameError: - class GeneratorExit(Exception): - """ This exception is never raised, it is there to make it possible to - write code compatible with CPython 2.5 even in lower CPython - versions.""" - pass - GeneratorExit.__module__ = 'exceptions' - -_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit) - -try: - callable = callable -except NameError: - def callable(obj): - return hasattr(obj, "__call__") - -if sys.version_info >= (3, 0): - exec ("print_ = print ; exec_=exec") - import builtins - - # some backward compatibility helpers - _basestring = str - def _totext(obj, encoding=None, errors=None): - if isinstance(obj, bytes): - if errors is None: - obj = obj.decode(encoding) - else: - obj = obj.decode(encoding, errors) - elif not isinstance(obj, str): - obj = str(obj) - return obj - - def _isbytes(x): - return isinstance(x, bytes) - def _istext(x): - return isinstance(x, str) - - text = str - bytes = bytes - - - def _getimself(function): - return getattr(function, '__self__', None) - - def _getfuncdict(function): - return getattr(function, "__dict__", None) - - def _getcode(function): - return getattr(function, "__code__", None) - - def execfile(fn, globs=None, locs=None): - if globs is None: - back = sys._getframe(1) - globs = back.f_globals - locs = back.f_locals - del back - elif locs is None: - locs = globs - fp = open(fn, "r") - try: - source = fp.read() - finally: - fp.close() - co = compile(source, fn, "exec", dont_inherit=True) - exec_(co, globs, locs) - -else: - import __builtin__ as builtins - _totext = unicode - _basestring = basestring - text = unicode - bytes = str - execfile = execfile - callable = callable - def _isbytes(x): - return isinstance(x, str) - def _istext(x): - return isinstance(x, unicode) - - def _getimself(function): - return getattr(function, 'im_self', None) - - def _getfuncdict(function): - return getattr(function, "__dict__", None) - - def _getcode(function): - try: - return getattr(function, "__code__") - except AttributeError: - return getattr(function, "func_code", None) - - def print_(*args, **kwargs): - """ minimal backport of py3k print statement. """ - sep = ' ' - if 'sep' in kwargs: - sep = kwargs.pop('sep') - end = '\n' - if 'end' in kwargs: - end = kwargs.pop('end') - file = 'file' in kwargs and kwargs.pop('file') or sys.stdout - if kwargs: - args = ", ".join([str(x) for x in kwargs]) - raise TypeError("invalid keyword arguments: %s" % args) - at_start = True - for x in args: - if not at_start: - file.write(sep) - file.write(str(x)) - at_start = False - file.write(end) - - def exec_(obj, globals=None, locals=None): - """ minimal backport of py3k exec statement. """ - __tracebackhide__ = True - if globals is None: - frame = sys._getframe(1) - globals = frame.f_globals - if locals is None: - locals = frame.f_locals - elif locals is None: - locals = globals - exec2(obj, globals, locals) - -if sys.version_info >= (3, 0): - def _reraise(cls, val, tb): - __tracebackhide__ = True - assert hasattr(val, '__traceback__') - raise cls.with_traceback(val, tb) -else: - exec (""" -def _reraise(cls, val, tb): - __tracebackhide__ = True - raise cls, val, tb -def exec2(obj, globals, locals): - __tracebackhide__ = True - exec obj in globals, locals -""") - -def _tryimport(*names): - """ return the first successfully imported module. """ - assert names - for name in names: - try: - __import__(name) - except ImportError: - excinfo = sys.exc_info() - else: - return sys.modules[name] - _reraise(*excinfo) +import sys + +try: + reversed = reversed +except NameError: + def reversed(sequence): + """reversed(sequence) -> reverse iterator over values of the sequence + + Return a reverse iterator + """ + if hasattr(sequence, '__reversed__'): + return sequence.__reversed__() + if not hasattr(sequence, '__getitem__'): + raise TypeError("argument to reversed() must be a sequence") + return reversed_iterator(sequence) + + class reversed_iterator(object): + + def __init__(self, seq): + self.seq = seq + self.remaining = len(seq) + + def __iter__(self): + return self + + def next(self): + i = self.remaining + if i > 0: + i -= 1 + item = self.seq[i] + self.remaining = i + return item + raise StopIteration + + def __length_hint__(self): + return self.remaining + +try: + any = any +except NameError: + def any(iterable): + for x in iterable: + if x: + return True + return False + +try: + all = all +except NameError: + def all(iterable): + for x in iterable: + if not x: + return False + return True + +try: + sorted = sorted +except NameError: + builtin_cmp = cmp # need to use cmp as keyword arg + + def sorted(iterable, cmp=None, key=None, reverse=0): + use_cmp = None + if key is not None: + if cmp is None: + def use_cmp(x, y): + return builtin_cmp(x[0], y[0]) + else: + def use_cmp(x, y): + return cmp(x[0], y[0]) + l = [(key(element), element) for element in iterable] + else: + if cmp is not None: + use_cmp = cmp + l = list(iterable) + if use_cmp is not None: + l.sort(use_cmp) + else: + l.sort() + if reverse: + l.reverse() + if key is not None: + return [element for (_, element) in l] + return l + +try: + set, frozenset = set, frozenset +except NameError: + from sets import set, frozenset + +# pass through +enumerate = enumerate + +try: + BaseException = BaseException +except NameError: + BaseException = Exception + +try: + GeneratorExit = GeneratorExit +except NameError: + class GeneratorExit(Exception): + """ This exception is never raised, it is there to make it possible to + write code compatible with CPython 2.5 even in lower CPython + versions.""" + pass + GeneratorExit.__module__ = 'exceptions' + +_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit) + +try: + callable = callable +except NameError: + def callable(obj): + return hasattr(obj, "__call__") + +if sys.version_info >= (3, 0): + exec ("print_ = print ; exec_=exec") + import builtins + + # some backward compatibility helpers + _basestring = str + def _totext(obj, encoding=None, errors=None): + if isinstance(obj, bytes): + if errors is None: + obj = obj.decode(encoding) + else: + obj = obj.decode(encoding, errors) + elif not isinstance(obj, str): + obj = str(obj) + return obj + + def _isbytes(x): + return isinstance(x, bytes) + def _istext(x): + return isinstance(x, str) + + text = str + bytes = bytes + + + def _getimself(function): + return getattr(function, '__self__', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def _getcode(function): + return getattr(function, "__code__", None) + + def execfile(fn, globs=None, locs=None): + if globs is None: + back = sys._getframe(1) + globs = back.f_globals + locs = back.f_locals + del back + elif locs is None: + locs = globs + fp = open(fn, "r") + try: + source = fp.read() + finally: + fp.close() + co = compile(source, fn, "exec", dont_inherit=True) + exec_(co, globs, locs) + +else: + import __builtin__ as builtins + _totext = unicode + _basestring = basestring + text = unicode + bytes = str + execfile = execfile + callable = callable + def _isbytes(x): + return isinstance(x, str) + def _istext(x): + return isinstance(x, unicode) + + def _getimself(function): + return getattr(function, 'im_self', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def _getcode(function): + try: + return getattr(function, "__code__") + except AttributeError: + return getattr(function, "func_code", None) + + def print_(*args, **kwargs): + """ minimal backport of py3k print statement. """ + sep = ' ' + if 'sep' in kwargs: + sep = kwargs.pop('sep') + end = '\n' + if 'end' in kwargs: + end = kwargs.pop('end') + file = 'file' in kwargs and kwargs.pop('file') or sys.stdout + if kwargs: + args = ", ".join([str(x) for x in kwargs]) + raise TypeError("invalid keyword arguments: %s" % args) + at_start = True + for x in args: + if not at_start: + file.write(sep) + file.write(str(x)) + at_start = False + file.write(end) + + def exec_(obj, globals=None, locals=None): + """ minimal backport of py3k exec statement. """ + __tracebackhide__ = True + if globals is None: + frame = sys._getframe(1) + globals = frame.f_globals + if locals is None: + locals = frame.f_locals + elif locals is None: + locals = globals + exec2(obj, globals, locals) + +if sys.version_info >= (3, 0): + def _reraise(cls, val, tb): + __tracebackhide__ = True + assert hasattr(val, '__traceback__') + raise cls.with_traceback(val, tb) +else: + exec (""" +def _reraise(cls, val, tb): + __tracebackhide__ = True + raise cls, val, tb +def exec2(obj, globals, locals): + __tracebackhide__ = True + exec obj in globals, locals +""") + +def _tryimport(*names): + """ return the first successfully imported module. """ + assert names + for name in names: + try: + __import__(name) + except ImportError: + excinfo = sys.exc_info() + else: + return sys.modules[name] + _reraise(*excinfo) diff --git a/third_party/python/py/py/_code/__init__.py b/third_party/python/py/py/_code/__init__.py index f15acf8513211..152ff585f53eb 100644 --- a/third_party/python/py/py/_code/__init__.py +++ b/third_party/python/py/py/_code/__init__.py @@ -1 +1 @@ -""" python inspection/code generation API """ +""" python inspection/code generation API """ diff --git a/third_party/python/py/py/_code/_assertionnew.py b/third_party/python/py/py/_code/_assertionnew.py index afb1b31ff0592..e9b7050e4099c 100644 --- a/third_party/python/py/py/_code/_assertionnew.py +++ b/third_party/python/py/py/_code/_assertionnew.py @@ -1,339 +1,339 @@ -""" -Find intermediate evalutation results in assert statements through builtin AST. -This should replace _assertionold.py eventually. -""" - -import sys -import ast - -import py -from py._code.assertion import _format_explanation, BuiltinAssertionError - - -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): - # See http://bugs.jython.org/issue1497 - _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", - "ListComp", "GeneratorExp", "Yield", "Compare", "Call", - "Repr", "Num", "Str", "Attribute", "Subscript", "Name", - "List", "Tuple") - _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", - "AugAssign", "Print", "For", "While", "If", "With", "Raise", - "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", - "Exec", "Global", "Expr", "Pass", "Break", "Continue") - _expr_nodes = set(getattr(ast, name) for name in _exprs) - _stmt_nodes = set(getattr(ast, name) for name in _stmts) - def _is_ast_expr(node): - return node.__class__ in _expr_nodes - def _is_ast_stmt(node): - return node.__class__ in _stmt_nodes -else: - def _is_ast_expr(node): - return isinstance(node, ast.expr) - def _is_ast_stmt(node): - return isinstance(node, ast.stmt) - - -class Failure(Exception): - """Error found while interpreting AST.""" - - def __init__(self, explanation=""): - self.cause = sys.exc_info() - self.explanation = explanation - - -def interpret(source, frame, should_fail=False): - mod = ast.parse(source) - visitor = DebugInterpreter(frame) - try: - visitor.visit(mod) - except Failure: - failure = sys.exc_info()[1] - return getfailure(failure) - if should_fail: - return ("(assertion failed, but when it was re-run for " - "printing intermediate values, it did not fail. Suggestions: " - "compute assert expression before the assert or use --no-assert)") - -def run(offending_line, frame=None): - if frame is None: - frame = py.code.Frame(sys._getframe(1)) - return interpret(offending_line, frame) - -def getfailure(failure): - explanation = _format_explanation(failure.explanation) - value = failure.cause[1] - if str(value): - lines = explanation.splitlines() - if not lines: - lines.append("") - lines[0] += " << %s" % (value,) - explanation = "\n".join(lines) - text = "%s: %s" % (failure.cause[0].__name__, explanation) - if text.startswith("AssertionError: assert "): - text = text[16:] - return text - - -operator_map = { - ast.BitOr : "|", - ast.BitXor : "^", - ast.BitAnd : "&", - ast.LShift : "<<", - ast.RShift : ">>", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" -} - -unary_map = { - ast.Not : "not %s", - ast.Invert : "~%s", - ast.USub : "-%s", - ast.UAdd : "+%s" -} - - -class DebugInterpreter(ast.NodeVisitor): - """Interpret AST nodes to gleam useful debugging information. """ - - def __init__(self, frame): - self.frame = frame - - def generic_visit(self, node): - # Fallback when we don't have a special implementation. - if _is_ast_expr(node): - mod = ast.Expression(node) - co = self._compile(mod) - try: - result = self.frame.eval(co) - except Exception: - raise Failure() - explanation = self.frame.repr(result) - return explanation, result - elif _is_ast_stmt(node): - mod = ast.Module([node]) - co = self._compile(mod, "exec") - try: - self.frame.exec_(co) - except Exception: - raise Failure() - return None, None - else: - raise AssertionError("can't handle %s" %(node,)) - - def _compile(self, source, mode="eval"): - return compile(source, "", mode) - - def visit_Expr(self, expr): - return self.visit(expr.value) - - def visit_Module(self, mod): - for stmt in mod.body: - self.visit(stmt) - - def visit_Name(self, name): - explanation, result = self.generic_visit(name) - # See if the name is local. - source = "%r in locals() is not globals()" % (name.id,) - co = self._compile(source) - try: - local = self.frame.eval(co) - except Exception: - # have to assume it isn't - local = False - if not local: - return name.id, result - return explanation, result - - def visit_Compare(self, comp): - left = comp.left - left_explanation, left_result = self.visit(left) - for op, next_op in zip(comp.ops, comp.comparators): - next_explanation, next_result = self.visit(next_op) - op_symbol = operator_map[op.__class__] - explanation = "%s %s %s" % (left_explanation, op_symbol, - next_explanation) - source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) - co = self._compile(source) - try: - result = self.frame.eval(co, __exprinfo_left=left_result, - __exprinfo_right=next_result) - except Exception: - raise Failure(explanation) - try: - if not result: - break - except KeyboardInterrupt: - raise - except: - break - left_explanation, left_result = next_explanation, next_result - - rcomp = py.code._reprcompare - if rcomp: - res = rcomp(op_symbol, left_result, next_result) - if res: - explanation = res - return explanation, result - - def visit_BoolOp(self, boolop): - is_or = isinstance(boolop.op, ast.Or) - explanations = [] - for operand in boolop.values: - explanation, result = self.visit(operand) - explanations.append(explanation) - if result == is_or: - break - name = is_or and " or " or " and " - explanation = "(" + name.join(explanations) + ")" - return explanation, result - - def visit_UnaryOp(self, unary): - pattern = unary_map[unary.op.__class__] - operand_explanation, operand_result = self.visit(unary.operand) - explanation = pattern % (operand_explanation,) - co = self._compile(pattern % ("__exprinfo_expr",)) - try: - result = self.frame.eval(co, __exprinfo_expr=operand_result) - except Exception: - raise Failure(explanation) - return explanation, result - - def visit_BinOp(self, binop): - left_explanation, left_result = self.visit(binop.left) - right_explanation, right_result = self.visit(binop.right) - symbol = operator_map[binop.op.__class__] - explanation = "(%s %s %s)" % (left_explanation, symbol, - right_explanation) - source = "__exprinfo_left %s __exprinfo_right" % (symbol,) - co = self._compile(source) - try: - result = self.frame.eval(co, __exprinfo_left=left_result, - __exprinfo_right=right_result) - except Exception: - raise Failure(explanation) - return explanation, result - - def visit_Call(self, call): - func_explanation, func = self.visit(call.func) - arg_explanations = [] - ns = {"__exprinfo_func" : func} - arguments = [] - for arg in call.args: - arg_explanation, arg_result = self.visit(arg) - arg_name = "__exprinfo_%s" % (len(ns),) - ns[arg_name] = arg_result - arguments.append(arg_name) - arg_explanations.append(arg_explanation) - for keyword in call.keywords: - arg_explanation, arg_result = self.visit(keyword.value) - arg_name = "__exprinfo_%s" % (len(ns),) - ns[arg_name] = arg_result - keyword_source = "%s=%%s" % (keyword.arg) - arguments.append(keyword_source % (arg_name,)) - arg_explanations.append(keyword_source % (arg_explanation,)) - if call.starargs: - arg_explanation, arg_result = self.visit(call.starargs) - arg_name = "__exprinfo_star" - ns[arg_name] = arg_result - arguments.append("*%s" % (arg_name,)) - arg_explanations.append("*%s" % (arg_explanation,)) - if call.kwargs: - arg_explanation, arg_result = self.visit(call.kwargs) - arg_name = "__exprinfo_kwds" - ns[arg_name] = arg_result - arguments.append("**%s" % (arg_name,)) - arg_explanations.append("**%s" % (arg_explanation,)) - args_explained = ", ".join(arg_explanations) - explanation = "%s(%s)" % (func_explanation, args_explained) - args = ", ".join(arguments) - source = "__exprinfo_func(%s)" % (args,) - co = self._compile(source) - try: - result = self.frame.eval(co, **ns) - except Exception: - raise Failure(explanation) - pattern = "%s\n{%s = %s\n}" - rep = self.frame.repr(result) - explanation = pattern % (rep, rep, explanation) - return explanation, result - - def _is_builtin_name(self, name): - pattern = "%r not in globals() and %r not in locals()" - source = pattern % (name.id, name.id) - co = self._compile(source) - try: - return self.frame.eval(co) - except Exception: - return False - - def visit_Attribute(self, attr): - if not isinstance(attr.ctx, ast.Load): - return self.generic_visit(attr) - source_explanation, source_result = self.visit(attr.value) - explanation = "%s.%s" % (source_explanation, attr.attr) - source = "__exprinfo_expr.%s" % (attr.attr,) - co = self._compile(source) - try: - result = self.frame.eval(co, __exprinfo_expr=source_result) - except Exception: - raise Failure(explanation) - explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), - self.frame.repr(result), - source_explanation, attr.attr) - # Check if the attr is from an instance. - source = "%r in getattr(__exprinfo_expr, '__dict__', {})" - source = source % (attr.attr,) - co = self._compile(source) - try: - from_instance = self.frame.eval(co, __exprinfo_expr=source_result) - except Exception: - from_instance = True - if from_instance: - rep = self.frame.repr(result) - pattern = "%s\n{%s = %s\n}" - explanation = pattern % (rep, rep, explanation) - return explanation, result - - def visit_Assert(self, assrt): - test_explanation, test_result = self.visit(assrt.test) - if test_explanation.startswith("False\n{False =") and \ - test_explanation.endswith("\n"): - test_explanation = test_explanation[15:-2] - explanation = "assert %s" % (test_explanation,) - if not test_result: - try: - raise BuiltinAssertionError - except Exception: - raise Failure(explanation) - return explanation, test_result - - def visit_Assign(self, assign): - value_explanation, value_result = self.visit(assign.value) - explanation = "... = %s" % (value_explanation,) - name = ast.Name("__exprinfo_expr", ast.Load(), - lineno=assign.value.lineno, - col_offset=assign.value.col_offset) - new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, - col_offset=assign.col_offset) - mod = ast.Module([new_assign]) - co = self._compile(mod, "exec") - try: - self.frame.exec_(co, __exprinfo_expr=value_result) - except Exception: - raise Failure(explanation) - return explanation, value_result +""" +Find intermediate evalutation results in assert statements through builtin AST. +This should replace _assertionold.py eventually. +""" + +import sys +import ast + +import py +from py._code.assertion import _format_explanation, BuiltinAssertionError + + +if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): + # See http://bugs.jython.org/issue1497 + _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", + "ListComp", "GeneratorExp", "Yield", "Compare", "Call", + "Repr", "Num", "Str", "Attribute", "Subscript", "Name", + "List", "Tuple") + _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", + "AugAssign", "Print", "For", "While", "If", "With", "Raise", + "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", + "Exec", "Global", "Expr", "Pass", "Break", "Continue") + _expr_nodes = set(getattr(ast, name) for name in _exprs) + _stmt_nodes = set(getattr(ast, name) for name in _stmts) + def _is_ast_expr(node): + return node.__class__ in _expr_nodes + def _is_ast_stmt(node): + return node.__class__ in _stmt_nodes +else: + def _is_ast_expr(node): + return isinstance(node, ast.expr) + def _is_ast_stmt(node): + return isinstance(node, ast.stmt) + + +class Failure(Exception): + """Error found while interpreting AST.""" + + def __init__(self, explanation=""): + self.cause = sys.exc_info() + self.explanation = explanation + + +def interpret(source, frame, should_fail=False): + mod = ast.parse(source) + visitor = DebugInterpreter(frame) + try: + visitor.visit(mod) + except Failure: + failure = sys.exc_info()[1] + return getfailure(failure) + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --no-assert)") + +def run(offending_line, frame=None): + if frame is None: + frame = py.code.Frame(sys._getframe(1)) + return interpret(offending_line, frame) + +def getfailure(failure): + explanation = _format_explanation(failure.explanation) + value = failure.cause[1] + if str(value): + lines = explanation.splitlines() + if not lines: + lines.append("") + lines[0] += " << %s" % (value,) + explanation = "\n".join(lines) + text = "%s: %s" % (failure.cause[0].__name__, explanation) + if text.startswith("AssertionError: assert "): + text = text[16:] + return text + + +operator_map = { + ast.BitOr : "|", + ast.BitXor : "^", + ast.BitAnd : "&", + ast.LShift : "<<", + ast.RShift : ">>", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Pow : "**", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + +unary_map = { + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" +} + + +class DebugInterpreter(ast.NodeVisitor): + """Interpret AST nodes to gleam useful debugging information. """ + + def __init__(self, frame): + self.frame = frame + + def generic_visit(self, node): + # Fallback when we don't have a special implementation. + if _is_ast_expr(node): + mod = ast.Expression(node) + co = self._compile(mod) + try: + result = self.frame.eval(co) + except Exception: + raise Failure() + explanation = self.frame.repr(result) + return explanation, result + elif _is_ast_stmt(node): + mod = ast.Module([node]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co) + except Exception: + raise Failure() + return None, None + else: + raise AssertionError("can't handle %s" %(node,)) + + def _compile(self, source, mode="eval"): + return compile(source, "", mode) + + def visit_Expr(self, expr): + return self.visit(expr.value) + + def visit_Module(self, mod): + for stmt in mod.body: + self.visit(stmt) + + def visit_Name(self, name): + explanation, result = self.generic_visit(name) + # See if the name is local. + source = "%r in locals() is not globals()" % (name.id,) + co = self._compile(source) + try: + local = self.frame.eval(co) + except Exception: + # have to assume it isn't + local = False + if not local: + return name.id, result + return explanation, result + + def visit_Compare(self, comp): + left = comp.left + left_explanation, left_result = self.visit(left) + for op, next_op in zip(comp.ops, comp.comparators): + next_explanation, next_result = self.visit(next_op) + op_symbol = operator_map[op.__class__] + explanation = "%s %s %s" % (left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=next_result) + except Exception: + raise Failure(explanation) + try: + if not result: + break + except KeyboardInterrupt: + raise + except: + break + left_explanation, left_result = next_explanation, next_result + + rcomp = py.code._reprcompare + if rcomp: + res = rcomp(op_symbol, left_result, next_result) + if res: + explanation = res + return explanation, result + + def visit_BoolOp(self, boolop): + is_or = isinstance(boolop.op, ast.Or) + explanations = [] + for operand in boolop.values: + explanation, result = self.visit(operand) + explanations.append(explanation) + if result == is_or: + break + name = is_or and " or " or " and " + explanation = "(" + name.join(explanations) + ")" + return explanation, result + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_explanation, operand_result = self.visit(unary.operand) + explanation = pattern % (operand_explanation,) + co = self._compile(pattern % ("__exprinfo_expr",)) + try: + result = self.frame.eval(co, __exprinfo_expr=operand_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_BinOp(self, binop): + left_explanation, left_result = self.visit(binop.left) + right_explanation, right_result = self.visit(binop.right) + symbol = operator_map[binop.op.__class__] + explanation = "(%s %s %s)" % (left_explanation, symbol, + right_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=right_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_Call(self, call): + func_explanation, func = self.visit(call.func) + arg_explanations = [] + ns = {"__exprinfo_func" : func} + arguments = [] + for arg in call.args: + arg_explanation, arg_result = self.visit(arg) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + arguments.append(arg_name) + arg_explanations.append(arg_explanation) + for keyword in call.keywords: + arg_explanation, arg_result = self.visit(keyword.value) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + keyword_source = "%s=%%s" % (keyword.arg) + arguments.append(keyword_source % (arg_name,)) + arg_explanations.append(keyword_source % (arg_explanation,)) + if call.starargs: + arg_explanation, arg_result = self.visit(call.starargs) + arg_name = "__exprinfo_star" + ns[arg_name] = arg_result + arguments.append("*%s" % (arg_name,)) + arg_explanations.append("*%s" % (arg_explanation,)) + if call.kwargs: + arg_explanation, arg_result = self.visit(call.kwargs) + arg_name = "__exprinfo_kwds" + ns[arg_name] = arg_result + arguments.append("**%s" % (arg_name,)) + arg_explanations.append("**%s" % (arg_explanation,)) + args_explained = ", ".join(arg_explanations) + explanation = "%s(%s)" % (func_explanation, args_explained) + args = ", ".join(arguments) + source = "__exprinfo_func(%s)" % (args,) + co = self._compile(source) + try: + result = self.frame.eval(co, **ns) + except Exception: + raise Failure(explanation) + pattern = "%s\n{%s = %s\n}" + rep = self.frame.repr(result) + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def _is_builtin_name(self, name): + pattern = "%r not in globals() and %r not in locals()" + source = pattern % (name.id, name.id) + co = self._compile(source) + try: + return self.frame.eval(co) + except Exception: + return False + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + source_explanation, source_result = self.visit(attr.value) + explanation = "%s.%s" % (source_explanation, attr.attr) + source = "__exprinfo_expr.%s" % (attr.attr,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + raise Failure(explanation) + explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), + self.frame.repr(result), + source_explanation, attr.attr) + # Check if the attr is from an instance. + source = "%r in getattr(__exprinfo_expr, '__dict__', {})" + source = source % (attr.attr,) + co = self._compile(source) + try: + from_instance = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + from_instance = True + if from_instance: + rep = self.frame.repr(result) + pattern = "%s\n{%s = %s\n}" + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def visit_Assert(self, assrt): + test_explanation, test_result = self.visit(assrt.test) + if test_explanation.startswith("False\n{False =") and \ + test_explanation.endswith("\n"): + test_explanation = test_explanation[15:-2] + explanation = "assert %s" % (test_explanation,) + if not test_result: + try: + raise BuiltinAssertionError + except Exception: + raise Failure(explanation) + return explanation, test_result + + def visit_Assign(self, assign): + value_explanation, value_result = self.visit(assign.value) + explanation = "... = %s" % (value_explanation,) + name = ast.Name("__exprinfo_expr", ast.Load(), + lineno=assign.value.lineno, + col_offset=assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, + col_offset=assign.col_offset) + mod = ast.Module([new_assign]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co, __exprinfo_expr=value_result) + except Exception: + raise Failure(explanation) + return explanation, value_result diff --git a/third_party/python/py/py/_code/_assertionold.py b/third_party/python/py/py/_code/_assertionold.py index 4e81fb3ef6ec7..98786e4853ed5 100644 --- a/third_party/python/py/py/_code/_assertionold.py +++ b/third_party/python/py/py/_code/_assertionold.py @@ -1,555 +1,555 @@ -import py -import sys, inspect -from compiler import parse, ast, pycodegen -from py._code.assertion import BuiltinAssertionError, _format_explanation - -passthroughex = py.builtin._sysex - -class Failure: - def __init__(self, node): - self.exc, self.value, self.tb = sys.exc_info() - self.node = node - -class View(object): - """View base class. - - If C is a subclass of View, then C(x) creates a proxy object around - the object x. The actual class of the proxy is not C in general, - but a *subclass* of C determined by the rules below. To avoid confusion - we call view class the class of the proxy (a subclass of C, so of View) - and object class the class of x. - - Attributes and methods not found in the proxy are automatically read on x. - Other operations like setting attributes are performed on the proxy, as - determined by its view class. The object x is available from the proxy - as its __obj__ attribute. - - The view class selection is determined by the __view__ tuples and the - optional __viewkey__ method. By default, the selected view class is the - most specific subclass of C whose __view__ mentions the class of x. - If no such subclass is found, the search proceeds with the parent - object classes. For example, C(True) will first look for a subclass - of C with __view__ = (..., bool, ...) and only if it doesn't find any - look for one with __view__ = (..., int, ...), and then ..., object,... - If everything fails the class C itself is considered to be the default. - - Alternatively, the view class selection can be driven by another aspect - of the object x, instead of the class of x, by overriding __viewkey__. - See last example at the end of this module. - """ - - _viewcache = {} - __view__ = () - - def __new__(rootclass, obj, *args, **kwds): - self = object.__new__(rootclass) - self.__obj__ = obj - self.__rootclass__ = rootclass - key = self.__viewkey__() - try: - self.__class__ = self._viewcache[key] - except KeyError: - self.__class__ = self._selectsubclass(key) - return self - - def __getattr__(self, attr): - # attributes not found in the normal hierarchy rooted on View - # are looked up in the object's real class - return getattr(self.__obj__, attr) - - def __viewkey__(self): - return self.__obj__.__class__ - - def __matchkey__(self, key, subclasses): - if inspect.isclass(key): - keys = inspect.getmro(key) - else: - keys = [key] - for key in keys: - result = [C for C in subclasses if key in C.__view__] - if result: - return result - return [] - - def _selectsubclass(self, key): - subclasses = list(enumsubclasses(self.__rootclass__)) - for C in subclasses: - if not isinstance(C.__view__, tuple): - C.__view__ = (C.__view__,) - choices = self.__matchkey__(key, subclasses) - if not choices: - return self.__rootclass__ - elif len(choices) == 1: - return choices[0] - else: - # combine the multiple choices - return type('?', tuple(choices), {}) - - def __repr__(self): - return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) - - -def enumsubclasses(cls): - for subcls in cls.__subclasses__(): - for subsubclass in enumsubclasses(subcls): - yield subsubclass - yield cls - - -class Interpretable(View): - """A parse tree node with a few extra methods.""" - explanation = None - - def is_builtin(self, frame): - return False - - def eval(self, frame): - # fall-back for unknown expression nodes - try: - expr = ast.Expression(self.__obj__) - expr.filename = '' - self.__obj__.filename = '' - co = pycodegen.ExpressionCodeGenerator(expr).getCode() - result = frame.eval(co) - except passthroughex: - raise - except: - raise Failure(self) - self.result = result - self.explanation = self.explanation or frame.repr(self.result) - - def run(self, frame): - # fall-back for unknown statement nodes - try: - expr = ast.Module(None, ast.Stmt([self.__obj__])) - expr.filename = '' - co = pycodegen.ModuleCodeGenerator(expr).getCode() - frame.exec_(co) - except passthroughex: - raise - except: - raise Failure(self) - - def nice_explanation(self): - return _format_explanation(self.explanation) - - -class Name(Interpretable): - __view__ = ast.Name - - def is_local(self, frame): - source = '%r in locals() is not globals()' % self.name - try: - return frame.is_true(frame.eval(source)) - except passthroughex: - raise - except: - return False - - def is_global(self, frame): - source = '%r in globals()' % self.name - try: - return frame.is_true(frame.eval(source)) - except passthroughex: - raise - except: - return False - - def is_builtin(self, frame): - source = '%r not in locals() and %r not in globals()' % ( - self.name, self.name) - try: - return frame.is_true(frame.eval(source)) - except passthroughex: - raise - except: - return False - - def eval(self, frame): - super(Name, self).eval(frame) - if not self.is_local(frame): - self.explanation = self.name - -class Compare(Interpretable): - __view__ = ast.Compare - - def eval(self, frame): - expr = Interpretable(self.expr) - expr.eval(frame) - for operation, expr2 in self.ops: - if hasattr(self, 'result'): - # shortcutting in chained expressions - if not frame.is_true(self.result): - break - expr2 = Interpretable(expr2) - expr2.eval(frame) - self.explanation = "%s %s %s" % ( - expr.explanation, operation, expr2.explanation) - source = "__exprinfo_left %s __exprinfo_right" % operation - try: - self.result = frame.eval(source, - __exprinfo_left=expr.result, - __exprinfo_right=expr2.result) - except passthroughex: - raise - except: - raise Failure(self) - expr = expr2 - -class And(Interpretable): - __view__ = ast.And - - def eval(self, frame): - explanations = [] - for expr in self.nodes: - expr = Interpretable(expr) - expr.eval(frame) - explanations.append(expr.explanation) - self.result = expr.result - if not frame.is_true(expr.result): - break - self.explanation = '(' + ' and '.join(explanations) + ')' - -class Or(Interpretable): - __view__ = ast.Or - - def eval(self, frame): - explanations = [] - for expr in self.nodes: - expr = Interpretable(expr) - expr.eval(frame) - explanations.append(expr.explanation) - self.result = expr.result - if frame.is_true(expr.result): - break - self.explanation = '(' + ' or '.join(explanations) + ')' - - -# == Unary operations == -keepalive = [] -for astclass, astpattern in { - ast.Not : 'not __exprinfo_expr', - ast.Invert : '(~__exprinfo_expr)', - }.items(): - - class UnaryArith(Interpretable): - __view__ = astclass - - def eval(self, frame, astpattern=astpattern): - expr = Interpretable(self.expr) - expr.eval(frame) - self.explanation = astpattern.replace('__exprinfo_expr', - expr.explanation) - try: - self.result = frame.eval(astpattern, - __exprinfo_expr=expr.result) - except passthroughex: - raise - except: - raise Failure(self) - - keepalive.append(UnaryArith) - -# == Binary operations == -for astclass, astpattern in { - ast.Add : '(__exprinfo_left + __exprinfo_right)', - ast.Sub : '(__exprinfo_left - __exprinfo_right)', - ast.Mul : '(__exprinfo_left * __exprinfo_right)', - ast.Div : '(__exprinfo_left / __exprinfo_right)', - ast.Mod : '(__exprinfo_left % __exprinfo_right)', - ast.Power : '(__exprinfo_left ** __exprinfo_right)', - }.items(): - - class BinaryArith(Interpretable): - __view__ = astclass - - def eval(self, frame, astpattern=astpattern): - left = Interpretable(self.left) - left.eval(frame) - right = Interpretable(self.right) - right.eval(frame) - self.explanation = (astpattern - .replace('__exprinfo_left', left .explanation) - .replace('__exprinfo_right', right.explanation)) - try: - self.result = frame.eval(astpattern, - __exprinfo_left=left.result, - __exprinfo_right=right.result) - except passthroughex: - raise - except: - raise Failure(self) - - keepalive.append(BinaryArith) - - -class CallFunc(Interpretable): - __view__ = ast.CallFunc - - def is_bool(self, frame): - source = 'isinstance(__exprinfo_value, bool)' - try: - return frame.is_true(frame.eval(source, - __exprinfo_value=self.result)) - except passthroughex: - raise - except: - return False - - def eval(self, frame): - node = Interpretable(self.node) - node.eval(frame) - explanations = [] - vars = {'__exprinfo_fn': node.result} - source = '__exprinfo_fn(' - for a in self.args: - if isinstance(a, ast.Keyword): - keyword = a.name - a = a.expr - else: - keyword = None - a = Interpretable(a) - a.eval(frame) - argname = '__exprinfo_%d' % len(vars) - vars[argname] = a.result - if keyword is None: - source += argname + ',' - explanations.append(a.explanation) - else: - source += '%s=%s,' % (keyword, argname) - explanations.append('%s=%s' % (keyword, a.explanation)) - if self.star_args: - star_args = Interpretable(self.star_args) - star_args.eval(frame) - argname = '__exprinfo_star' - vars[argname] = star_args.result - source += '*' + argname + ',' - explanations.append('*' + star_args.explanation) - if self.dstar_args: - dstar_args = Interpretable(self.dstar_args) - dstar_args.eval(frame) - argname = '__exprinfo_kwds' - vars[argname] = dstar_args.result - source += '**' + argname + ',' - explanations.append('**' + dstar_args.explanation) - self.explanation = "%s(%s)" % ( - node.explanation, ', '.join(explanations)) - if source.endswith(','): - source = source[:-1] - source += ')' - try: - self.result = frame.eval(source, **vars) - except passthroughex: - raise - except: - raise Failure(self) - if not node.is_builtin(frame) or not self.is_bool(frame): - r = frame.repr(self.result) - self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) - -class Getattr(Interpretable): - __view__ = ast.Getattr - - def eval(self, frame): - expr = Interpretable(self.expr) - expr.eval(frame) - source = '__exprinfo_expr.%s' % self.attrname - try: - self.result = frame.eval(source, __exprinfo_expr=expr.result) - except passthroughex: - raise - except: - raise Failure(self) - self.explanation = '%s.%s' % (expr.explanation, self.attrname) - # if the attribute comes from the instance, its value is interesting - source = ('hasattr(__exprinfo_expr, "__dict__") and ' - '%r in __exprinfo_expr.__dict__' % self.attrname) - try: - from_instance = frame.is_true( - frame.eval(source, __exprinfo_expr=expr.result)) - except passthroughex: - raise - except: - from_instance = True - if from_instance: - r = frame.repr(self.result) - self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) - -# == Re-interpretation of full statements == - -class Assert(Interpretable): - __view__ = ast.Assert - - def run(self, frame): - test = Interpretable(self.test) - test.eval(frame) - # simplify 'assert False where False = ...' - if (test.explanation.startswith('False\n{False = ') and - test.explanation.endswith('\n}')): - test.explanation = test.explanation[15:-2] - # print the result as 'assert ' - self.result = test.result - self.explanation = 'assert ' + test.explanation - if not frame.is_true(test.result): - try: - raise BuiltinAssertionError - except passthroughex: - raise - except: - raise Failure(self) - -class Assign(Interpretable): - __view__ = ast.Assign - - def run(self, frame): - expr = Interpretable(self.expr) - expr.eval(frame) - self.result = expr.result - self.explanation = '... = ' + expr.explanation - # fall-back-run the rest of the assignment - ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) - mod = ast.Module(None, ast.Stmt([ass])) - mod.filename = '' - co = pycodegen.ModuleCodeGenerator(mod).getCode() - try: - frame.exec_(co, __exprinfo_expr=expr.result) - except passthroughex: - raise - except: - raise Failure(self) - -class Discard(Interpretable): - __view__ = ast.Discard - - def run(self, frame): - expr = Interpretable(self.expr) - expr.eval(frame) - self.result = expr.result - self.explanation = expr.explanation - -class Stmt(Interpretable): - __view__ = ast.Stmt - - def run(self, frame): - for stmt in self.nodes: - stmt = Interpretable(stmt) - stmt.run(frame) - - -def report_failure(e): - explanation = e.node.nice_explanation() - if explanation: - explanation = ", in: " + explanation - else: - explanation = "" - sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) - -def check(s, frame=None): - if frame is None: - frame = sys._getframe(1) - frame = py.code.Frame(frame) - expr = parse(s, 'eval') - assert isinstance(expr, ast.Expression) - node = Interpretable(expr.node) - try: - node.eval(frame) - except passthroughex: - raise - except Failure: - e = sys.exc_info()[1] - report_failure(e) - else: - if not frame.is_true(node.result): - sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) - - -########################################################### -# API / Entry points -# ######################################################### - -def interpret(source, frame, should_fail=False): - module = Interpretable(parse(source, 'exec').node) - #print "got module", module - if isinstance(frame, py.std.types.FrameType): - frame = py.code.Frame(frame) - try: - module.run(frame) - except Failure: - e = sys.exc_info()[1] - return getfailure(e) - except passthroughex: - raise - except: - import traceback - traceback.print_exc() - if should_fail: - return ("(assertion failed, but when it was re-run for " - "printing intermediate values, it did not fail. Suggestions: " - "compute assert expression before the assert or use --nomagic)") - else: - return None - -def getmsg(excinfo): - if isinstance(excinfo, tuple): - excinfo = py.code.ExceptionInfo(excinfo) - #frame, line = gettbline(tb) - #frame = py.code.Frame(frame) - #return interpret(line, frame) - - tb = excinfo.traceback[-1] - source = str(tb.statement).strip() - x = interpret(source, tb.frame, should_fail=True) - if not isinstance(x, str): - raise TypeError("interpret returned non-string %r" % (x,)) - return x - -def getfailure(e): - explanation = e.node.nice_explanation() - if str(e.value): - lines = explanation.split('\n') - lines[0] += " << %s" % (e.value,) - explanation = '\n'.join(lines) - text = "%s: %s" % (e.exc.__name__, explanation) - if text.startswith('AssertionError: assert '): - text = text[16:] - return text - -def run(s, frame=None): - if frame is None: - frame = sys._getframe(1) - frame = py.code.Frame(frame) - module = Interpretable(parse(s, 'exec').node) - try: - module.run(frame) - except Failure: - e = sys.exc_info()[1] - report_failure(e) - - -if __name__ == '__main__': - # example: - def f(): - return 5 - def g(): - return 3 - def h(x): - return 'never' - check("f() * g() == 5") - check("not f()") - check("not (f() and g() or 0)") - check("f() == g()") - i = 4 - check("i == f()") - check("len(f()) == 0") - check("isinstance(2+3+4, float)") - - run("x = i") - check("x == 5") - - run("assert not f(), 'oops'") - run("a, b, c = 1, 2") - run("a, b, c = f()") - - check("max([f(),g()]) == 4") - check("'hello'[g()] == 'h'") - run("'guk%d' % h(f())") +import py +import sys, inspect +from compiler import parse, ast, pycodegen +from py._code.assertion import BuiltinAssertionError, _format_explanation + +passthroughex = py.builtin._sysex + +class Failure: + def __init__(self, node): + self.exc, self.value, self.tb = sys.exc_info() + self.node = node + +class View(object): + """View base class. + + If C is a subclass of View, then C(x) creates a proxy object around + the object x. The actual class of the proxy is not C in general, + but a *subclass* of C determined by the rules below. To avoid confusion + we call view class the class of the proxy (a subclass of C, so of View) + and object class the class of x. + + Attributes and methods not found in the proxy are automatically read on x. + Other operations like setting attributes are performed on the proxy, as + determined by its view class. The object x is available from the proxy + as its __obj__ attribute. + + The view class selection is determined by the __view__ tuples and the + optional __viewkey__ method. By default, the selected view class is the + most specific subclass of C whose __view__ mentions the class of x. + If no such subclass is found, the search proceeds with the parent + object classes. For example, C(True) will first look for a subclass + of C with __view__ = (..., bool, ...) and only if it doesn't find any + look for one with __view__ = (..., int, ...), and then ..., object,... + If everything fails the class C itself is considered to be the default. + + Alternatively, the view class selection can be driven by another aspect + of the object x, instead of the class of x, by overriding __viewkey__. + See last example at the end of this module. + """ + + _viewcache = {} + __view__ = () + + def __new__(rootclass, obj, *args, **kwds): + self = object.__new__(rootclass) + self.__obj__ = obj + self.__rootclass__ = rootclass + key = self.__viewkey__() + try: + self.__class__ = self._viewcache[key] + except KeyError: + self.__class__ = self._selectsubclass(key) + return self + + def __getattr__(self, attr): + # attributes not found in the normal hierarchy rooted on View + # are looked up in the object's real class + return getattr(self.__obj__, attr) + + def __viewkey__(self): + return self.__obj__.__class__ + + def __matchkey__(self, key, subclasses): + if inspect.isclass(key): + keys = inspect.getmro(key) + else: + keys = [key] + for key in keys: + result = [C for C in subclasses if key in C.__view__] + if result: + return result + return [] + + def _selectsubclass(self, key): + subclasses = list(enumsubclasses(self.__rootclass__)) + for C in subclasses: + if not isinstance(C.__view__, tuple): + C.__view__ = (C.__view__,) + choices = self.__matchkey__(key, subclasses) + if not choices: + return self.__rootclass__ + elif len(choices) == 1: + return choices[0] + else: + # combine the multiple choices + return type('?', tuple(choices), {}) + + def __repr__(self): + return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) + + +def enumsubclasses(cls): + for subcls in cls.__subclasses__(): + for subsubclass in enumsubclasses(subcls): + yield subsubclass + yield cls + + +class Interpretable(View): + """A parse tree node with a few extra methods.""" + explanation = None + + def is_builtin(self, frame): + return False + + def eval(self, frame): + # fall-back for unknown expression nodes + try: + expr = ast.Expression(self.__obj__) + expr.filename = '' + self.__obj__.filename = '' + co = pycodegen.ExpressionCodeGenerator(expr).getCode() + result = frame.eval(co) + except passthroughex: + raise + except: + raise Failure(self) + self.result = result + self.explanation = self.explanation or frame.repr(self.result) + + def run(self, frame): + # fall-back for unknown statement nodes + try: + expr = ast.Module(None, ast.Stmt([self.__obj__])) + expr.filename = '' + co = pycodegen.ModuleCodeGenerator(expr).getCode() + frame.exec_(co) + except passthroughex: + raise + except: + raise Failure(self) + + def nice_explanation(self): + return _format_explanation(self.explanation) + + +class Name(Interpretable): + __view__ = ast.Name + + def is_local(self, frame): + source = '%r in locals() is not globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_global(self, frame): + source = '%r in globals()' % self.name + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def is_builtin(self, frame): + source = '%r not in locals() and %r not in globals()' % ( + self.name, self.name) + try: + return frame.is_true(frame.eval(source)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + super(Name, self).eval(frame) + if not self.is_local(frame): + self.explanation = self.name + +class Compare(Interpretable): + __view__ = ast.Compare + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + for operation, expr2 in self.ops: + if hasattr(self, 'result'): + # shortcutting in chained expressions + if not frame.is_true(self.result): + break + expr2 = Interpretable(expr2) + expr2.eval(frame) + self.explanation = "%s %s %s" % ( + expr.explanation, operation, expr2.explanation) + source = "__exprinfo_left %s __exprinfo_right" % operation + try: + self.result = frame.eval(source, + __exprinfo_left=expr.result, + __exprinfo_right=expr2.result) + except passthroughex: + raise + except: + raise Failure(self) + expr = expr2 + +class And(Interpretable): + __view__ = ast.And + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if not frame.is_true(expr.result): + break + self.explanation = '(' + ' and '.join(explanations) + ')' + +class Or(Interpretable): + __view__ = ast.Or + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if frame.is_true(expr.result): + break + self.explanation = '(' + ' or '.join(explanations) + ')' + + +# == Unary operations == +keepalive = [] +for astclass, astpattern in { + ast.Not : 'not __exprinfo_expr', + ast.Invert : '(~__exprinfo_expr)', + }.items(): + + class UnaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + expr = Interpretable(self.expr) + expr.eval(frame) + self.explanation = astpattern.replace('__exprinfo_expr', + expr.explanation) + try: + self.result = frame.eval(astpattern, + __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(UnaryArith) + +# == Binary operations == +for astclass, astpattern in { + ast.Add : '(__exprinfo_left + __exprinfo_right)', + ast.Sub : '(__exprinfo_left - __exprinfo_right)', + ast.Mul : '(__exprinfo_left * __exprinfo_right)', + ast.Div : '(__exprinfo_left / __exprinfo_right)', + ast.Mod : '(__exprinfo_left % __exprinfo_right)', + ast.Power : '(__exprinfo_left ** __exprinfo_right)', + }.items(): + + class BinaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern): + left = Interpretable(self.left) + left.eval(frame) + right = Interpretable(self.right) + right.eval(frame) + self.explanation = (astpattern + .replace('__exprinfo_left', left .explanation) + .replace('__exprinfo_right', right.explanation)) + try: + self.result = frame.eval(astpattern, + __exprinfo_left=left.result, + __exprinfo_right=right.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(BinaryArith) + + +class CallFunc(Interpretable): + __view__ = ast.CallFunc + + def is_bool(self, frame): + source = 'isinstance(__exprinfo_value, bool)' + try: + return frame.is_true(frame.eval(source, + __exprinfo_value=self.result)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + node = Interpretable(self.node) + node.eval(frame) + explanations = [] + vars = {'__exprinfo_fn': node.result} + source = '__exprinfo_fn(' + for a in self.args: + if isinstance(a, ast.Keyword): + keyword = a.name + a = a.expr + else: + keyword = None + a = Interpretable(a) + a.eval(frame) + argname = '__exprinfo_%d' % len(vars) + vars[argname] = a.result + if keyword is None: + source += argname + ',' + explanations.append(a.explanation) + else: + source += '%s=%s,' % (keyword, argname) + explanations.append('%s=%s' % (keyword, a.explanation)) + if self.star_args: + star_args = Interpretable(self.star_args) + star_args.eval(frame) + argname = '__exprinfo_star' + vars[argname] = star_args.result + source += '*' + argname + ',' + explanations.append('*' + star_args.explanation) + if self.dstar_args: + dstar_args = Interpretable(self.dstar_args) + dstar_args.eval(frame) + argname = '__exprinfo_kwds' + vars[argname] = dstar_args.result + source += '**' + argname + ',' + explanations.append('**' + dstar_args.explanation) + self.explanation = "%s(%s)" % ( + node.explanation, ', '.join(explanations)) + if source.endswith(','): + source = source[:-1] + source += ')' + try: + self.result = frame.eval(source, **vars) + except passthroughex: + raise + except: + raise Failure(self) + if not node.is_builtin(frame) or not self.is_bool(frame): + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +class Getattr(Interpretable): + __view__ = ast.Getattr + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + source = '__exprinfo_expr.%s' % self.attrname + try: + self.result = frame.eval(source, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + self.explanation = '%s.%s' % (expr.explanation, self.attrname) + # if the attribute comes from the instance, its value is interesting + source = ('hasattr(__exprinfo_expr, "__dict__") and ' + '%r in __exprinfo_expr.__dict__' % self.attrname) + try: + from_instance = frame.is_true( + frame.eval(source, __exprinfo_expr=expr.result)) + except passthroughex: + raise + except: + from_instance = True + if from_instance: + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +# == Re-interpretation of full statements == + +class Assert(Interpretable): + __view__ = ast.Assert + + def run(self, frame): + test = Interpretable(self.test) + test.eval(frame) + # simplify 'assert False where False = ...' + if (test.explanation.startswith('False\n{False = ') and + test.explanation.endswith('\n}')): + test.explanation = test.explanation[15:-2] + # print the result as 'assert ' + self.result = test.result + self.explanation = 'assert ' + test.explanation + if not frame.is_true(test.result): + try: + raise BuiltinAssertionError + except passthroughex: + raise + except: + raise Failure(self) + +class Assign(Interpretable): + __view__ = ast.Assign + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = '... = ' + expr.explanation + # fall-back-run the rest of the assignment + ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) + mod = ast.Module(None, ast.Stmt([ass])) + mod.filename = '' + co = pycodegen.ModuleCodeGenerator(mod).getCode() + try: + frame.exec_(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + +class Discard(Interpretable): + __view__ = ast.Discard + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = expr.explanation + +class Stmt(Interpretable): + __view__ = ast.Stmt + + def run(self, frame): + for stmt in self.nodes: + stmt = Interpretable(stmt) + stmt.run(frame) + + +def report_failure(e): + explanation = e.node.nice_explanation() + if explanation: + explanation = ", in: " + explanation + else: + explanation = "" + sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) + +def check(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + expr = parse(s, 'eval') + assert isinstance(expr, ast.Expression) + node = Interpretable(expr.node) + try: + node.eval(frame) + except passthroughex: + raise + except Failure: + e = sys.exc_info()[1] + report_failure(e) + else: + if not frame.is_true(node.result): + sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) + + +########################################################### +# API / Entry points +# ######################################################### + +def interpret(source, frame, should_fail=False): + module = Interpretable(parse(source, 'exec').node) + #print "got module", module + if isinstance(frame, py.std.types.FrameType): + frame = py.code.Frame(frame) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + return getfailure(e) + except passthroughex: + raise + except: + import traceback + traceback.print_exc() + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + else: + return None + +def getmsg(excinfo): + if isinstance(excinfo, tuple): + excinfo = py.code.ExceptionInfo(excinfo) + #frame, line = gettbline(tb) + #frame = py.code.Frame(frame) + #return interpret(line, frame) + + tb = excinfo.traceback[-1] + source = str(tb.statement).strip() + x = interpret(source, tb.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + return x + +def getfailure(e): + explanation = e.node.nice_explanation() + if str(e.value): + lines = explanation.split('\n') + lines[0] += " << %s" % (e.value,) + explanation = '\n'.join(lines) + text = "%s: %s" % (e.exc.__name__, explanation) + if text.startswith('AssertionError: assert '): + text = text[16:] + return text + +def run(s, frame=None): + if frame is None: + frame = sys._getframe(1) + frame = py.code.Frame(frame) + module = Interpretable(parse(s, 'exec').node) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + report_failure(e) + + +if __name__ == '__main__': + # example: + def f(): + return 5 + def g(): + return 3 + def h(x): + return 'never' + check("f() * g() == 5") + check("not f()") + check("not (f() and g() or 0)") + check("f() == g()") + i = 4 + check("i == f()") + check("len(f()) == 0") + check("isinstance(2+3+4, float)") + + run("x = i") + check("x == 5") + + run("assert not f(), 'oops'") + run("a, b, c = 1, 2") + run("a, b, c = f()") + + check("max([f(),g()]) == 4") + check("'hello'[g()] == 'h'") + run("'guk%d' % h(f())") diff --git a/third_party/python/py/py/_code/_py2traceback.py b/third_party/python/py/py/_code/_py2traceback.py index d65e27cb73077..227cb96919e6e 100644 --- a/third_party/python/py/py/_code/_py2traceback.py +++ b/third_party/python/py/py/_code/_py2traceback.py @@ -1,79 +1,79 @@ -# copied from python-2.7.3's traceback.py -# CHANGES: -# - some_str is replaced, trying to create unicode strings -# -import types - -def format_exception_only(etype, value): - """Format the exception part of a traceback. - - The arguments are the exception type and value such as given by - sys.last_type and sys.last_value. The return value is a list of - strings, each ending in a newline. - - Normally, the list contains a single string; however, for - SyntaxError exceptions, it contains several lines that (when - printed) display detailed information about where the syntax - error occurred. - - The message indicating which exception occurred is always the last - string in the list. - - """ - - # An instance should not have a meaningful value parameter, but - # sometimes does, particularly for string exceptions, such as - # >>> raise string1, string2 # deprecated - # - # Clear these out first because issubtype(string1, SyntaxError) - # would throw another exception and mask the original problem. - if (isinstance(etype, BaseException) or - isinstance(etype, types.InstanceType) or - etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] - - stype = etype.__name__ - - if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] - - # It was a syntax error; show exactly where the problem was found. - lines = [] - try: - msg, (filename, lineno, offset, badline) = value.args - except Exception: - pass - else: - filename = filename or "" - lines.append(' File "%s", line %d\n' % (filename, lineno)) - if badline is not None: - lines.append(' %s\n' % badline.strip()) - if offset is not None: - caretspace = badline.rstrip('\n')[:offset].lstrip() - # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c.isspace() and c or ' ') for c in caretspace) - # only three spaces to account for offset1 == pos 0 - lines.append(' %s^\n' % ''.join(caretspace)) - value = msg - - lines.append(_format_final_exc_line(stype, value)) - return lines - -def _format_final_exc_line(etype, value): - """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) - if value is None or not valuestr: - line = "%s\n" % etype - else: - line = "%s: %s\n" % (etype, valuestr) - return line - -def _some_str(value): - try: - return unicode(value) - except Exception: - try: - return str(value) - except Exception: - pass - return '' % type(value).__name__ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +import types + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if (isinstance(etype, BaseException) or + isinstance(etype, types.InstanceType) or + etype is None or type(etype) is str): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "%s", line %d\n' % (filename, lineno)) + if badline is not None: + lines.append(' %s\n' % badline.strip()) + if offset is not None: + caretspace = badline.rstrip('\n')[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or ' ') for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(' %s^\n' % ''.join(caretspace)) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "%s\n" % etype + else: + line = "%s: %s\n" % (etype, valuestr) + return line + +def _some_str(value): + try: + return unicode(value) + except Exception: + try: + return str(value) + except Exception: + pass + return '' % type(value).__name__ diff --git a/third_party/python/py/py/_code/assertion.py b/third_party/python/py/py/_code/assertion.py index 4ce80c75b1c0a..3bcb8cd121228 100644 --- a/third_party/python/py/py/_code/assertion.py +++ b/third_party/python/py/py/_code/assertion.py @@ -1,94 +1,94 @@ -import sys -import py - -BuiltinAssertionError = py.builtin.builtins.AssertionError - -_reprcompare = None # if set, will be called by assert reinterp for comparison ops - -def _format_explanation(explanation): - """This formats an explanation - - Normally all embedded newlines are escaped, however there are - three exceptions: \n{, \n} and \n~. The first two are intended - cover nested explanations, see function and attribute explanations - for examples (.visit_Call(), visit_Attribute()). The last one is - for when one explanation needs to span multiple lines, e.g. when - displaying diffs. - """ - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ - lines = [raw_lines[0]] - for l in raw_lines[1:]: - if l.startswith('{') or l.startswith('}') or l.startswith('~'): - lines.append(l) - else: - lines[-1] += '\\n' + l - - result = lines[:1] - stack = [0] - stackcnt = [0] - for line in lines[1:]: - if line.startswith('{'): - if stackcnt[-1]: - s = 'and ' - else: - s = 'where ' - stack.append(len(result)) - stackcnt[-1] += 1 - stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) - elif line.startswith('}'): - assert line.startswith('}') - stack.pop() - stackcnt.pop() - result[stack[-1]] += line[1:] - else: - assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) - assert len(stack) == 1 - return '\n'.join(result) - - -class AssertionError(BuiltinAssertionError): - def __init__(self, *args): - BuiltinAssertionError.__init__(self, *args) - if args: - try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) - else: - f = py.code.Frame(sys._getframe(1)) - try: - source = f.code.fullsource - if source is not None: - try: - source = source.getstatement(f.lineno, assertion=True) - except IndexError: - source = None - else: - source = str(source.deindent()).strip() - except py.error.ENOENT: - source = None - # this can also occur during reinterpretation, when the - # co_filename is set to "". - if source: - self.msg = reinterpret(source, f, should_fail=True) - else: - self.msg = "" - if not self.args: - self.args = (self.msg,) - -if sys.version_info > (3, 0): - AssertionError.__module__ = "builtins" - reinterpret_old = "old reinterpretation not available for py3" -else: - from py._code._assertionold import interpret as reinterpret_old -if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): - from py._code._assertionnew import interpret as reinterpret -else: - reinterpret = reinterpret_old - +import sys +import py + +BuiltinAssertionError = py.builtin.builtins.AssertionError + +_reprcompare = None # if set, will be called by assert reinterp for comparison ops + +def _format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + raw_lines = (explanation or '').split('\n') + # escape newlines not followed by {, } and ~ + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l.startswith('{') or l.startswith('}') or l.startswith('~'): + lines.append(l) + else: + lines[-1] += '\\n' + l + + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = 'and ' + else: + s = 'where ' + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + elif line.startswith('}'): + assert line.startswith('}') + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line.startswith('~') + result.append(' '*len(stack) + line[1:]) + assert len(stack) == 1 + return '\n'.join(result) + + +class AssertionError(BuiltinAssertionError): + def __init__(self, *args): + BuiltinAssertionError.__init__(self, *args) + if args: + try: + self.msg = str(args[0]) + except py.builtin._sysex: + raise + except: + self.msg = "<[broken __repr__] %s at %0xd>" %( + args[0].__class__, id(args[0])) + else: + f = py.code.Frame(sys._getframe(1)) + try: + source = f.code.fullsource + if source is not None: + try: + source = source.getstatement(f.lineno, assertion=True) + except IndexError: + source = None + else: + source = str(source.deindent()).strip() + except py.error.ENOENT: + source = None + # this can also occur during reinterpretation, when the + # co_filename is set to "". + if source: + self.msg = reinterpret(source, f, should_fail=True) + else: + self.msg = "" + if not self.args: + self.args = (self.msg,) + +if sys.version_info > (3, 0): + AssertionError.__module__ = "builtins" + reinterpret_old = "old reinterpretation not available for py3" +else: + from py._code._assertionold import interpret as reinterpret_old +if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): + from py._code._assertionnew import interpret as reinterpret +else: + reinterpret = reinterpret_old + diff --git a/third_party/python/py/py/_code/code.py b/third_party/python/py/py/_code/code.py index f14c562a2961c..822cf4b7c21fc 100644 --- a/third_party/python/py/py/_code/code.py +++ b/third_party/python/py/py/_code/code.py @@ -1,787 +1,787 @@ -import py -import sys -from inspect import CO_VARARGS, CO_VARKEYWORDS - -builtin_repr = repr - -reprlib = py.builtin._tryimport('repr', 'reprlib') - -if sys.version_info[0] >= 3: - from traceback import format_exception_only -else: - from py._code._py2traceback import format_exception_only - -class Code(object): - """ wrapper around Python code objects """ - def __init__(self, rawcode): - if not hasattr(rawcode, "co_filename"): - rawcode = py.code.getrawcode(rawcode) - try: - self.filename = rawcode.co_filename - self.firstlineno = rawcode.co_firstlineno - 1 - self.name = rawcode.co_name - except AttributeError: - raise TypeError("not a code object: %r" %(rawcode,)) - self.raw = rawcode - - def __eq__(self, other): - return self.raw == other.raw - - def __ne__(self, other): - return not self == other - - @property - def path(self): - """ return a path object pointing to source code (note that it - might not point to an actually existing file). """ - p = py.path.local(self.raw.co_filename) - # maybe don't try this checking - if not p.check(): - # XXX maybe try harder like the weird logic - # in the standard lib [linecache.updatecache] does? - p = self.raw.co_filename - return p - - @property - def fullsource(self): - """ return a py.code.Source object for the full source file of the code - """ - from py._code import source - full, _ = source.findsource(self.raw) - return full - - def source(self): - """ return a py.code.Source object for the code object's source only - """ - # return source only for that part of code - return py.code.Source(self.raw) - - def getargs(self, var=False): - """ return a tuple with the argument names for the code object - - if 'var' is set True also return the names of the variable and - keyword arguments when present - """ - # handfull shortcut for getting args - raw = self.raw - argcount = raw.co_argcount - if var: - argcount += raw.co_flags & CO_VARARGS - argcount += raw.co_flags & CO_VARKEYWORDS - return raw.co_varnames[:argcount] - -class Frame(object): - """Wrapper around a Python frame holding f_locals and f_globals - in which expressions can be evaluated.""" - - def __init__(self, frame): - self.lineno = frame.f_lineno - 1 - self.f_globals = frame.f_globals - self.f_locals = frame.f_locals - self.raw = frame - self.code = py.code.Code(frame.f_code) - - @property - def statement(self): - """ statement this frame is at """ - if self.code.fullsource is None: - return py.code.Source("") - return self.code.fullsource.getstatement(self.lineno) - - def eval(self, code, **vars): - """ evaluate 'code' in the frame - - 'vars' are optional additional local variables - - returns the result of the evaluation - """ - f_locals = self.f_locals.copy() - f_locals.update(vars) - return eval(code, self.f_globals, f_locals) - - def exec_(self, code, **vars): - """ exec 'code' in the frame - - 'vars' are optiona; additional local variables - """ - f_locals = self.f_locals.copy() - f_locals.update(vars) - py.builtin.exec_(code, self.f_globals, f_locals ) - - def repr(self, object): - """ return a 'safe' (non-recursive, one-line) string repr for 'object' - """ - return py.io.saferepr(object) - - def is_true(self, object): - return object - - def getargs(self, var=False): - """ return a list of tuples (name, value) for all arguments - - if 'var' is set True also include the variable and keyword - arguments when present - """ - retval = [] - for arg in self.code.getargs(var): - try: - retval.append((arg, self.f_locals[arg])) - except KeyError: - pass # this can occur when using Psyco - return retval - -class TracebackEntry(object): - """ a single entry in a traceback """ - - _repr_style = None - exprinfo = None - - def __init__(self, rawentry): - self._rawentry = rawentry - self.lineno = rawentry.tb_lineno - 1 - - def set_repr_style(self, mode): - assert mode in ("short", "long") - self._repr_style = mode - - @property - def frame(self): - return py.code.Frame(self._rawentry.tb_frame) - - @property - def relline(self): - return self.lineno - self.frame.code.firstlineno - - def __repr__(self): - return "" %(self.frame.code.path, self.lineno+1) - - @property - def statement(self): - """ py.code.Source object for the current statement """ - source = self.frame.code.fullsource - return source.getstatement(self.lineno) - - @property - def path(self): - """ path to the source code """ - return self.frame.code.path - - def getlocals(self): - return self.frame.f_locals - locals = property(getlocals, None, None, "locals of underlaying frame") - - def reinterpret(self): - """Reinterpret the failing statement and returns a detailed information - about what operations are performed.""" - if self.exprinfo is None: - source = str(self.statement).strip() - x = py.code._reinterpret(source, self.frame, should_fail=True) - if not isinstance(x, str): - raise TypeError("interpret returned non-string %r" % (x,)) - self.exprinfo = x - return self.exprinfo - - def getfirstlinesource(self): - # on Jython this firstlineno can be -1 apparently - return max(self.frame.code.firstlineno, 0) - - def getsource(self, astcache=None): - """ return failing source code. """ - # we use the passed in astcache to not reparse asttrees - # within exception info printing - from py._code.source import getstatementrange_ast - source = self.frame.code.fullsource - if source is None: - return None - key = astnode = None - if astcache is not None: - key = self.frame.code.path - if key is not None: - astnode = astcache.get(key, None) - start = self.getfirstlinesource() - try: - astnode, _, end = getstatementrange_ast(self.lineno, source, - astnode=astnode) - except SyntaxError: - end = self.lineno + 1 - else: - if key is not None: - astcache[key] = astnode - return source[start:end] - - source = property(getsource) - - def ishidden(self): - """ return True if the current frame has a var __tracebackhide__ - resolving to True - - mostly for internal use - """ - try: - return self.frame.f_locals['__tracebackhide__'] - except KeyError: - try: - return self.frame.f_globals['__tracebackhide__'] - except KeyError: - return False - - def __str__(self): - try: - fn = str(self.path) - except py.error.Error: - fn = '???' - name = self.frame.code.name - try: - line = str(self.statement).lstrip() - except KeyboardInterrupt: - raise - except: - line = "???" - return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) - - def name(self): - return self.frame.code.raw.co_name - name = property(name, None, None, "co_name of underlaying code") - -class Traceback(list): - """ Traceback objects encapsulate and offer higher level - access to Traceback entries. - """ - Entry = TracebackEntry - def __init__(self, tb): - """ initialize from given python traceback object. """ - if hasattr(tb, 'tb_next'): - def f(cur): - while cur is not None: - yield self.Entry(cur) - cur = cur.tb_next - list.__init__(self, f(tb)) - else: - list.__init__(self, tb) - - def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): - """ return a Traceback instance wrapping part of this Traceback - - by provding any combination of path, lineno and firstlineno, the - first frame to start the to-be-returned traceback is determined - - this allows cutting the first part of a Traceback instance e.g. - for formatting reasons (removing some uninteresting bits that deal - with handling of the exception/traceback) - """ - for x in self: - code = x.frame.code - codepath = code.path - if ((path is None or codepath == path) and - (excludepath is None or not hasattr(codepath, 'relto') or - not codepath.relto(excludepath)) and - (lineno is None or x.lineno == lineno) and - (firstlineno is None or x.frame.code.firstlineno == firstlineno)): - return Traceback(x._rawentry) - return self - - def __getitem__(self, key): - val = super(Traceback, self).__getitem__(key) - if isinstance(key, type(slice(0))): - val = self.__class__(val) - return val - - def filter(self, fn=lambda x: not x.ishidden()): - """ return a Traceback instance with certain items removed - - fn is a function that gets a single argument, a TracebackItem - instance, and should return True when the item should be added - to the Traceback, False when not - - by default this removes all the TracebackItems which are hidden - (see ishidden() above) - """ - return Traceback(filter(fn, self)) - - def getcrashentry(self): - """ return last non-hidden traceback entry that lead - to the exception of a traceback. - """ - for i in range(-1, -len(self)-1, -1): - entry = self[i] - if not entry.ishidden(): - return entry - return self[-1] - - def recursionindex(self): - """ return the index of the frame/TracebackItem where recursion - originates if appropriate, None if no recursion occurred - """ - cache = {} - for i, entry in enumerate(self): - # id for the code.raw is needed to work around - # the strange metaprogramming in the decorator lib from pypi - # which generates code objects that have hash/value equality - #XXX needs a test - key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno - #print "checking for recursion at", key - l = cache.setdefault(key, []) - if l: - f = entry.frame - loc = f.f_locals - for otherloc in l: - if f.is_true(f.eval(co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc)): - return i - l.append(entry.frame.f_locals) - return None - -co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', - '?', 'eval') - -class ExceptionInfo(object): - """ wraps sys.exc_info() objects and offers - help for navigating the traceback. - """ - _striptext = '' - def __init__(self, tup=None, exprinfo=None): - if tup is None: - tup = sys.exc_info() - if exprinfo is None and isinstance(tup[1], AssertionError): - exprinfo = getattr(tup[1], 'msg', None) - if exprinfo is None: - exprinfo = str(tup[1]) - if exprinfo and exprinfo.startswith('assert '): - self._striptext = 'AssertionError: ' - self._excinfo = tup - #: the exception class - self.type = tup[0] - #: the exception instance - self.value = tup[1] - #: the exception raw traceback - self.tb = tup[2] - #: the exception type name - self.typename = self.type.__name__ - #: the exception traceback (py.code.Traceback instance) - self.traceback = py.code.Traceback(self.tb) - - def __repr__(self): - return "" % (self.typename, len(self.traceback)) - - def exconly(self, tryshort=False): - """ return the exception as a string - - when 'tryshort' resolves to True, and the exception is a - py.code._AssertionError, only the actual exception part of - the exception representation is returned (so 'AssertionError: ' is - removed from the beginning) - """ - lines = format_exception_only(self.type, self.value) - text = ''.join(lines) - text = text.rstrip() - if tryshort: - if text.startswith(self._striptext): - text = text[len(self._striptext):] - return text - - def errisinstance(self, exc): - """ return True if the exception is an instance of exc """ - return isinstance(self.value, exc) - - def _getreprcrash(self): - exconly = self.exconly(tryshort=True) - entry = self.traceback.getcrashentry() - path, lineno = entry.frame.code.raw.co_filename, entry.lineno - return ReprFileLocation(path, lineno+1, exconly) - - def getrepr(self, showlocals=False, style="long", - abspath=False, tbfilter=True, funcargs=False): - """ return str()able representation of this exception info. - showlocals: show locals per traceback entry - style: long|short|no|native traceback style - tbfilter: hide entries (where __tracebackhide__ is true) - - in case of style==native, tbfilter and showlocals is ignored. - """ - if style == 'native': - return ReprExceptionInfo(ReprTracebackNative( - py.std.traceback.format_exception( - self.type, - self.value, - self.traceback[0]._rawentry, - )), self._getreprcrash()) - - fmt = FormattedExcinfo(showlocals=showlocals, style=style, - abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) - return fmt.repr_excinfo(self) - - def __str__(self): - entry = self.traceback[-1] - loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return str(loc) - - def __unicode__(self): - entry = self.traceback[-1] - loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return unicode(loc) - - -class FormattedExcinfo(object): - """ presenting information about failing Functions and Generators. """ - # for traceback entries - flow_marker = ">" - fail_marker = "E" - - def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): - self.showlocals = showlocals - self.style = style - self.tbfilter = tbfilter - self.funcargs = funcargs - self.abspath = abspath - self.astcache = {} - - def _getindent(self, source): - # figure out indent for given source - try: - s = str(source.getstatement(len(source)-1)) - except KeyboardInterrupt: - raise - except: - try: - s = str(source[-1]) - except KeyboardInterrupt: - raise - except: - return 0 - return 4 + (len(s) - len(s.lstrip())) - - def _getentrysource(self, entry): - source = entry.getsource(self.astcache) - if source is not None: - source = source.deindent() - return source - - def _saferepr(self, obj): - return py.io.saferepr(obj) - - def repr_args(self, entry): - if self.funcargs: - args = [] - for argname, argvalue in entry.frame.getargs(var=True): - args.append((argname, self._saferepr(argvalue))) - return ReprFuncArgs(args) - - def get_source(self, source, line_index=-1, excinfo=None, short=False): - """ return formatted and marked up source lines. """ - lines = [] - if source is None or line_index >= len(source.lines): - source = py.code.Source("???") - line_index = 0 - if line_index < 0: - line_index += len(source) - space_prefix = " " - if short: - lines.append(space_prefix + source.lines[line_index].strip()) - else: - for line in source.lines[:line_index]: - lines.append(space_prefix + line) - lines.append(self.flow_marker + " " + source.lines[line_index]) - for line in source.lines[line_index+1:]: - lines.append(space_prefix + line) - if excinfo is not None: - indent = 4 if short else self._getindent(source) - lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) - return lines - - def get_exconly(self, excinfo, indent=4, markall=False): - lines = [] - indent = " " * indent - # get the real exception information out - exlines = excinfo.exconly(tryshort=True).split('\n') - failindent = self.fail_marker + indent[1:] - for line in exlines: - lines.append(failindent + line) - if not markall: - failindent = indent - return lines - - def repr_locals(self, locals): - if self.showlocals: - lines = [] - keys = [loc for loc in locals if loc[0] != "@"] - keys.sort() - for name in keys: - value = locals[name] - if name == '__builtins__': - lines.append("__builtins__ = ") - else: - # This formatting could all be handled by the - # _repr() function, which is only reprlib.Repr in - # disguise, so is very configurable. - str_repr = self._saferepr(value) - #if len(str_repr) < 70 or not isinstance(value, - # (list, tuple, dict)): - lines.append("%-10s = %s" %(name, str_repr)) - #else: - # self._line("%-10s =\\" % (name,)) - # # XXX - # py.std.pprint.pprint(value, stream=self.excinfowriter) - return ReprLocals(lines) - - def repr_traceback_entry(self, entry, excinfo=None): - source = self._getentrysource(entry) - if source is None: - source = py.code.Source("???") - line_index = 0 - else: - # entry.getfirstlinesource() can be -1, should be 0 on jython - line_index = entry.lineno - max(entry.getfirstlinesource(), 0) - - lines = [] - style = entry._repr_style - if style is None: - style = self.style - if style in ("short", "long"): - short = style == "short" - reprargs = self.repr_args(entry) if not short else None - s = self.get_source(source, line_index, excinfo, short=short) - lines.extend(s) - if short: - message = "in %s" %(entry.name) - else: - message = excinfo and excinfo.typename or "" - path = self._makepath(entry.path) - filelocrepr = ReprFileLocation(path, entry.lineno+1, message) - localsrepr = None - if not short: - localsrepr = self.repr_locals(entry.locals) - return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) - if excinfo: - lines.extend(self.get_exconly(excinfo, indent=4)) - return ReprEntry(lines, None, None, None, style) - - def _makepath(self, path): - if not self.abspath: - try: - np = py.path.local().bestrelpath(path) - except OSError: - return path - if len(np) < len(str(path)): - path = np - return path - - def repr_traceback(self, excinfo): - traceback = excinfo.traceback - if self.tbfilter: - traceback = traceback.filter() - recursionindex = None - if excinfo.errisinstance(RuntimeError): - if "maximum recursion depth exceeded" in str(excinfo.value): - recursionindex = traceback.recursionindex() - last = traceback[-1] - entries = [] - extraline = None - for index, entry in enumerate(traceback): - einfo = (last == entry) and excinfo or None - reprentry = self.repr_traceback_entry(entry, einfo) - entries.append(reprentry) - if index == recursionindex: - extraline = "!!! Recursion detected (same locals & position)" - break - return ReprTraceback(entries, extraline, style=self.style) - - def repr_excinfo(self, excinfo): - reprtraceback = self.repr_traceback(excinfo) - reprcrash = excinfo._getreprcrash() - return ReprExceptionInfo(reprtraceback, reprcrash) - -class TerminalRepr: - def __str__(self): - s = self.__unicode__() - if sys.version_info[0] < 3: - s = s.encode('utf-8') - return s - - def __unicode__(self): - # FYI this is called from pytest-xdist's serialization of exception - # information. - io = py.io.TextIO() - tw = py.io.TerminalWriter(file=io) - self.toterminal(tw) - return io.getvalue().strip() - - def __repr__(self): - return "<%s instance at %0x>" %(self.__class__, id(self)) - - -class ReprExceptionInfo(TerminalRepr): - def __init__(self, reprtraceback, reprcrash): - self.reprtraceback = reprtraceback - self.reprcrash = reprcrash - self.sections = [] - - def addsection(self, name, content, sep="-"): - self.sections.append((name, content, sep)) - - def toterminal(self, tw): - self.reprtraceback.toterminal(tw) - for name, content, sep in self.sections: - tw.sep(sep, name) - tw.line(content) - -class ReprTraceback(TerminalRepr): - entrysep = "_ " - - def __init__(self, reprentries, extraline, style): - self.reprentries = reprentries - self.extraline = extraline - self.style = style - - def toterminal(self, tw): - # the entries might have different styles - last_style = None - for i, entry in enumerate(self.reprentries): - if entry.style == "long": - tw.line("") - entry.toterminal(tw) - if i < len(self.reprentries) - 1: - next_entry = self.reprentries[i+1] - if entry.style == "long" or \ - entry.style == "short" and next_entry.style == "long": - tw.sep(self.entrysep) - - if self.extraline: - tw.line(self.extraline) - -class ReprTracebackNative(ReprTraceback): - def __init__(self, tblines): - self.style = "native" - self.reprentries = [ReprEntryNative(tblines)] - self.extraline = None - -class ReprEntryNative(TerminalRepr): - style = "native" - - def __init__(self, tblines): - self.lines = tblines - - def toterminal(self, tw): - tw.write("".join(self.lines)) - -class ReprEntry(TerminalRepr): - localssep = "_ " - - def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): - self.lines = lines - self.reprfuncargs = reprfuncargs - self.reprlocals = reprlocals - self.reprfileloc = filelocrepr - self.style = style - - def toterminal(self, tw): - if self.style == "short": - self.reprfileloc.toterminal(tw) - for line in self.lines: - red = line.startswith("E ") - tw.line(line, bold=True, red=red) - #tw.line("") - return - if self.reprfuncargs: - self.reprfuncargs.toterminal(tw) - for line in self.lines: - red = line.startswith("E ") - tw.line(line, bold=True, red=red) - if self.reprlocals: - #tw.sep(self.localssep, "Locals") - tw.line("") - self.reprlocals.toterminal(tw) - if self.reprfileloc: - if self.lines: - tw.line("") - self.reprfileloc.toterminal(tw) - - def __str__(self): - return "%s\n%s\n%s" % ("\n".join(self.lines), - self.reprlocals, - self.reprfileloc) - -class ReprFileLocation(TerminalRepr): - def __init__(self, path, lineno, message): - self.path = str(path) - self.lineno = lineno - self.message = message - - def toterminal(self, tw): - # filename and lineno output for each entry, - # using an output format that most editors unterstand - msg = self.message - i = msg.find("\n") - if i != -1: - msg = msg[:i] - tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) - -class ReprLocals(TerminalRepr): - def __init__(self, lines): - self.lines = lines - - def toterminal(self, tw): - for line in self.lines: - tw.line(line) - -class ReprFuncArgs(TerminalRepr): - def __init__(self, args): - self.args = args - - def toterminal(self, tw): - if self.args: - linesofar = "" - for name, value in self.args: - ns = "%s = %s" %(name, value) - if len(ns) + len(linesofar) + 2 > tw.fullwidth: - if linesofar: - tw.line(linesofar) - linesofar = ns - else: - if linesofar: - linesofar += ", " + ns - else: - linesofar = ns - if linesofar: - tw.line(linesofar) - tw.line("") - - - -oldbuiltins = {} - -def patch_builtins(assertion=True, compile=True): - """ put compile and AssertionError builtins to Python's builtins. """ - if assertion: - from py._code import assertion - l = oldbuiltins.setdefault('AssertionError', []) - l.append(py.builtin.builtins.AssertionError) - py.builtin.builtins.AssertionError = assertion.AssertionError - if compile: - l = oldbuiltins.setdefault('compile', []) - l.append(py.builtin.builtins.compile) - py.builtin.builtins.compile = py.code.compile - -def unpatch_builtins(assertion=True, compile=True): - """ remove compile and AssertionError builtins from Python builtins. """ - if assertion: - py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() - if compile: - py.builtin.builtins.compile = oldbuiltins['compile'].pop() - -def getrawcode(obj, trycall=True): - """ return code object for given function. """ - try: - return obj.__code__ - except AttributeError: - obj = getattr(obj, 'im_func', obj) - obj = getattr(obj, 'func_code', obj) - obj = getattr(obj, 'f_code', obj) - obj = getattr(obj, '__code__', obj) - if trycall and not hasattr(obj, 'co_firstlineno'): - if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj): - x = getrawcode(obj.__call__, trycall=False) - if hasattr(x, 'co_firstlineno'): - return x - return obj - +import py +import sys +from inspect import CO_VARARGS, CO_VARKEYWORDS + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +if sys.version_info[0] >= 3: + from traceback import format_exception_only +else: + from py._code._py2traceback import format_exception_only + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = py.code.getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" %(rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + return p + + @property + def fullsource(self): + """ return a py.code.Source object for the full source file of the code + """ + from py._code import source + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a py.code.Source object for the code object's source only + """ + # return source only for that part of code + return py.code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = py.code.Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + if self.code.fullsource is None: + return py.code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals ) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry): + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + return py.code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" %(self.frame.code.path, self.lineno+1) + + @property + def statement(self): + """ py.code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def reinterpret(self): + """Reinterpret the failing statement and returns a detailed information + about what operations are performed.""" + if self.exprinfo is None: + source = str(self.statement).strip() + x = py.code._reinterpret(source, self.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + self.exprinfo = x + return self.exprinfo + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from py._code.source import getstatementrange_ast + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast(self.lineno, source, + astnode=astnode) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + mostly for internal use + """ + try: + return self.frame.f_locals['__tracebackhide__'] + except KeyError: + try: + return self.frame.f_globals['__tracebackhide__'] + except KeyError: + return False + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + def __init__(self, tb): + """ initialize from given python traceback object. """ + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or not hasattr(codepath, 'relto') or + not codepath.relto(excludepath)) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackItem + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackItems which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self)) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self)-1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackItem where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + #XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = str(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (py.code.Traceback instance) + self.traceback = py.code.Traceback(self.tb) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + py.code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno+1, exconly) + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no|native traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + + in case of style==native, tbfilter and showlocals is ignored. + """ + if style == 'native': + return ReprExceptionInfo(ReprTracebackNative( + py.std.traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )), self._getreprcrash()) + + fmt = FormattedExcinfo(showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return loc.__unicode__() + + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + self.astcache = {} + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + lines = [] + if source is None or line_index >= len(source.lines): + source = py.code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index+1:]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # py.std.pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + source = self._getentrysource(entry) + if source is None: + source = py.code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" %(entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if excinfo.errisinstance(RuntimeError): + if "maximum recursion depth exceeded" in str(excinfo.value): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + def repr_excinfo(self, excinfo): + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + return ReprExceptionInfo(reprtraceback, reprcrash) + +class TerminalRepr: + def __str__(self): + s = self.__unicode__() + if sys.version_info[0] < 3: + s = s.encode('utf-8') + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + + +class ReprExceptionInfo(TerminalRepr): + def __init__(self, reprtraceback, reprcrash): + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + last_style = None + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i+1] + if entry.style == "long" or \ + entry.style == "short" and next_entry.style == "long": + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + #tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + + +oldbuiltins = {} + +def patch_builtins(assertion=True, compile=True): + """ put compile and AssertionError builtins to Python's builtins. """ + if assertion: + from py._code import assertion + l = oldbuiltins.setdefault('AssertionError', []) + l.append(py.builtin.builtins.AssertionError) + py.builtin.builtins.AssertionError = assertion.AssertionError + if compile: + l = oldbuiltins.setdefault('compile', []) + l.append(py.builtin.builtins.compile) + py.builtin.builtins.compile = py.code.compile + +def unpatch_builtins(assertion=True, compile=True): + """ remove compile and AssertionError builtins from Python builtins. """ + if assertion: + py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() + if compile: + py.builtin.builtins.compile = oldbuiltins['compile'].pop() + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + if trycall and not hasattr(obj, 'co_firstlineno'): + if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, 'co_firstlineno'): + return x + return obj + diff --git a/third_party/python/py/py/_code/source.py b/third_party/python/py/py/_code/source.py index 3a648e63579c5..59281b5d22843 100644 --- a/third_party/python/py/py/_code/source.py +++ b/third_party/python/py/py/_code/source.py @@ -1,419 +1,411 @@ -from __future__ import generators - -from bisect import bisect_right -import sys -import inspect, tokenize -import py -from types import ModuleType -cpy_compile = compile - -try: - import _ast - from _ast import PyCF_ONLY_AST as _AST_FLAG -except ImportError: - _AST_FLAG = 0 - _ast = None - - -class Source(object): - """ a immutable object holding a source code fragment, - possibly deindenting it. - """ - _compilecounter = 0 - def __init__(self, *parts, **kwargs): - self.lines = lines = [] - de = kwargs.get('deindent', True) - rstrip = kwargs.get('rstrip', True) - for part in parts: - if not part: - partlines = [] - if isinstance(part, Source): - partlines = part.lines - elif isinstance(part, (tuple, list)): - partlines = [x.rstrip("\n") for x in part] - elif isinstance(part, py.builtin._basestring): - partlines = part.split('\n') - if rstrip: - while partlines: - if partlines[-1].strip(): - break - partlines.pop() - else: - partlines = getsource(part, deindent=de).lines - if de: - partlines = deindent(partlines) - lines.extend(partlines) - - def __eq__(self, other): - try: - return self.lines == other.lines - except AttributeError: - if isinstance(other, str): - return str(self) == other - return False - - def __getitem__(self, key): - if isinstance(key, int): - return self.lines[key] - else: - if key.step not in (None, 1): - raise IndexError("cannot slice a Source with a step") - return self.__getslice__(key.start, key.stop) - - def __len__(self): - return len(self.lines) - - def __getslice__(self, start, end): - newsource = Source() - newsource.lines = self.lines[start:end] - return newsource - - def strip(self): - """ return new source object with trailing - and leading blank lines removed. - """ - start, end = 0, len(self) - while start < end and not self.lines[start].strip(): - start += 1 - while end > start and not self.lines[end-1].strip(): - end -= 1 - source = Source() - source.lines[:] = self.lines[start:end] - return source - - def putaround(self, before='', after='', indent=' ' * 4): - """ return a copy of the source object with - 'before' and 'after' wrapped around it. - """ - before = Source(before) - after = Source(after) - newsource = Source() - lines = [ (indent + line) for line in self.lines] - newsource.lines = before.lines + lines + after.lines - return newsource - - def indent(self, indent=' ' * 4): - """ return a copy of the source object with - all lines indented by the given indent-string. - """ - newsource = Source() - newsource.lines = [(indent+line) for line in self.lines] - return newsource - - def getstatement(self, lineno, assertion=False): - """ return Source statement which contains the - given linenumber (counted from 0). - """ - start, end = self.getstatementrange(lineno, assertion) - return self[start:end] - - def getstatementrange(self, lineno, assertion=False): - """ return (start, end) tuple which spans the minimal - statement region which containing the given lineno. - """ - if not (0 <= lineno < len(self)): - raise IndexError("lineno out of range") - ast, start, end = getstatementrange_ast(lineno, self) - return start, end - - def deindent(self, offset=None): - """ return a new source object deindented by offset. - If offset is None then guess an indentation offset from - the first non-blank line. Subsequent lines which have a - lower indentation offset will be copied verbatim as - they are assumed to be part of multilines. - """ - # XXX maybe use the tokenizer to properly handle multiline - # strings etc.pp? - newsource = Source() - newsource.lines[:] = deindent(self.lines, offset) - return newsource - - def isparseable(self, deindent=True): - """ return True if source is parseable, heuristically - deindenting it by default. - """ - try: - import parser - except ImportError: - syntax_checker = lambda x: compile(x, 'asd', 'exec') - else: - syntax_checker = parser.suite - - if deindent: - source = str(self.deindent()) - else: - source = str(self) - try: - #compile(source+'\n', "x", "exec") - syntax_checker(source+'\n') - except KeyboardInterrupt: - raise - except Exception: - return False - else: - return True - - def __str__(self): - return "\n".join(self.lines) - - def compile(self, filename=None, mode='exec', - flag=generators.compiler_flag, - dont_inherit=0, _genframe=None): - """ return compiled code object. if filename is None - invent an artificial filename which displays - the source/line position of the caller frame. - """ - if not filename or py.path.local(filename).check(file=0): - if _genframe is None: - _genframe = sys._getframe(1) # the caller - fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno - base = "<%d-codegen " % self._compilecounter - self.__class__._compilecounter += 1 - if not filename: - filename = base + '%s:%d>' % (fn, lineno) - else: - filename = base + '%r %s:%d>' % (filename, fn, lineno) - source = "\n".join(self.lines) + '\n' - try: - co = cpy_compile(source, filename, mode, flag) - except SyntaxError: - ex = sys.exc_info()[1] - # re-represent syntax errors from parsing python strings - msglines = self.lines[:ex.lineno] - if ex.offset: - msglines.append(" "*ex.offset + '^') - msglines.append("(code was compiled probably from here: %s)" % filename) - newex = SyntaxError('\n'.join(msglines)) - newex.offset = ex.offset - newex.lineno = ex.lineno - newex.text = ex.text - raise newex - else: - if flag & _AST_FLAG: - return co - lines = [(x + "\n") for x in self.lines] - if sys.version_info[0] >= 3: - # XXX py3's inspect.getsourcefile() checks for a module - # and a pep302 __loader__ ... we don't have a module - # at code compile-time so we need to fake it here - m = ModuleType("_pycodecompile_pseudo_module") - py.std.inspect.modulesbyfile[filename] = None - py.std.sys.modules[None] = m - m.__loader__ = 1 - py.std.linecache.cache[filename] = (1, None, lines, filename) - return co - -# -# public API shortcut functions -# - -def compile_(source, filename=None, mode='exec', flags= - generators.compiler_flag, dont_inherit=0): - """ compile the given source to a raw code object, - and maintain an internal cache which allows later - retrieval of the source code for the code object - and any recursively created code objects. - """ - if _ast is not None and isinstance(source, _ast.AST): - # XXX should Source support having AST? - return cpy_compile(source, filename, mode, flags, dont_inherit) - _genframe = sys._getframe(1) # the caller - s = Source(source) - co = s.compile(filename, mode, flags, _genframe=_genframe) - return co - - -def getfslineno(obj): - """ Return source location (path, lineno) for the given object. - If the source cannot be determined return ("", -1) - """ - try: - code = py.code.Code(obj) - except TypeError: - try: - fn = (py.std.inspect.getsourcefile(obj) or - py.std.inspect.getfile(obj)) - except TypeError: - return "", -1 - - fspath = fn and py.path.local(fn) or None - lineno = -1 - if fspath: - try: - _, lineno = findsource(obj) - except IOError: - pass - else: - fspath = code.path - lineno = code.firstlineno - assert isinstance(lineno, int) - return fspath, lineno - -# -# helper functions -# - -def findsource(obj): - try: - sourcelines, lineno = py.std.inspect.findsource(obj) - except py.builtin._sysex: - raise - except: - return None, -1 - source = Source() - source.lines = [line.rstrip() for line in sourcelines] - return source, lineno - -def getsource(obj, **kwargs): - obj = py.code.getrawcode(obj) - try: - strsrc = inspect.getsource(obj) - except IndentationError: - strsrc = "\"Buggy python version consider upgrading, cannot get source\"" - assert isinstance(strsrc, str) - return Source(strsrc, **kwargs) - -def deindent(lines, offset=None): - if offset is None: - for line in lines: - line = line.expandtabs() - s = line.lstrip() - if s: - offset = len(line)-len(s) - break - else: - offset = 0 - if offset == 0: - return list(lines) - newlines = [] - def readline_generator(lines): - for line in lines: - yield line + '\n' - while True: - yield '' - - it = readline_generator(lines) - - try: - for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): - if sline > len(lines): - break # End of input reached - if sline > len(newlines): - line = lines[sline - 1].expandtabs() - if line.lstrip() and line[:offset].isspace(): - line = line[offset:] # Deindent - newlines.append(line) - - for i in range(sline, eline): - # Don't deindent continuing lines of - # multiline tokens (i.e. multiline strings) - newlines.append(lines[i]) - except (IndentationError, tokenize.TokenError): - pass - # Add any lines we didn't see. E.g. if an exception was raised. - newlines.extend(lines[len(newlines):]) - return newlines - - -def get_statement_startend2(lineno, node): - import ast - # flatten all statements and except handlers into one lineno-list - # AST's line numbers start indexing at 1 - l = [] - for x in ast.walk(node): - if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): - l.append(x.lineno - 1) - for name in "finalbody", "orelse": - val = getattr(x, name, None) - if val: - # treat the finally/orelse part as its own statement - l.append(val[0].lineno - 1 - 1) - l.sort() - insert_index = bisect_right(l, lineno) - start = l[insert_index - 1] - if insert_index >= len(l): - end = None - else: - end = l[insert_index] - return start, end - - -def getstatementrange_ast(lineno, source, assertion=False, astnode=None): - if astnode is None: - content = str(source) - if sys.version_info < (2,7): - content += "\n" - try: - astnode = compile(content, "source", "exec", 1024) # 1024 for AST - except ValueError: - start, end = getstatementrange_old(lineno, source, assertion) - return None, start, end - start, end = get_statement_startend2(lineno, astnode) - # we need to correct the end: - # - ast-parsing strips comments - # - there might be empty lines - # - we might have lesser indented code blocks at the end - if end is None: - end = len(source.lines) - - if end > start + 1: - # make sure we don't span differently indented code blocks - # by using the BlockFinder helper used which inspect.getsource() uses itself - block_finder = inspect.BlockFinder() - # if we start with an indented line, put blockfinder to "started" mode - block_finder.started = source.lines[start][0].isspace() - it = ((x + "\n") for x in source.lines[start:end]) - try: - for tok in tokenize.generate_tokens(lambda: next(it)): - block_finder.tokeneater(*tok) - except (inspect.EndOfBlock, IndentationError): - end = block_finder.last + start - except Exception: - pass - - # the end might still point to a comment or empty line, correct it - while end: - line = source.lines[end - 1].lstrip() - if line.startswith("#") or not line: - end -= 1 - else: - break - return astnode, start, end - - -def getstatementrange_old(lineno, source, assertion=False): - """ return (start, end) tuple which spans the minimal - statement region which containing the given lineno. - raise an IndexError if no such statementrange can be found. - """ - # XXX this logic is only used on python2.4 and below - # 1. find the start of the statement - from codeop import compile_command - for start in range(lineno, -1, -1): - if assertion: - line = source.lines[start] - # the following lines are not fully tested, change with care - if 'super' in line and 'self' in line and '__init__' in line: - raise IndexError("likely a subclass") - if "assert" not in line and "raise" not in line: - continue - trylines = source.lines[start:lineno+1] - # quick hack to prepare parsing an indented line with - # compile_command() (which errors on "return" outside defs) - trylines.insert(0, 'def xxx():') - trysource = '\n '.join(trylines) - # ^ space here - try: - compile_command(trysource) - except (SyntaxError, OverflowError, ValueError): - continue - - # 2. find the end of the statement - for end in range(lineno+1, len(source)+1): - trysource = source[start:end] - if trysource.isparseable(): - return start, end - raise SyntaxError("no valid source range around line %d " % (lineno,)) - - +from __future__ import generators + +from bisect import bisect_right +import sys +import inspect, tokenize +import py +from types import ModuleType +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + _compilecounter = 0 + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + return self.__getslice__(key.start, key.stop) + + def __len__(self): + return len(self.lines) + + def __getslice__(self, start, end): + newsource = Source() + newsource.lines = self.lines[start:end] + return newsource + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno, assertion=False): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno, assertion) + return self[start:end] + + def getstatementrange(self, lineno, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + '%s:%d>' % (fn, lineno) + else: + filename = base + '%r %s:%d>' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + py.std.linecache.cache[filename] = (1, None, lines, filename) + return co + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + try: + code = py.code.Code(obj) + except TypeError: + try: + fn = (py.std.inspect.getsourcefile(obj) or + py.std.inspect.getfile(obj)) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + +# +# helper functions +# + +def findsource(obj): + try: + sourcelines, lineno = py.std.inspect.findsource(obj) + except py.builtin._sysex: + raise + except: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + +def getsource(obj, **kwargs): + obj = py.code.getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + it = readline_generator(lines) + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines + + +def get_statement_startend2(lineno, node): + import ast + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + l = [] + for x in ast.walk(node): + if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): + l.append(x.lineno - 1) + for name in "finalbody", "orelse": + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + l.append(val[0].lineno - 1 - 1) + l.sort() + insert_index = bisect_right(l, lineno) + start = l[insert_index - 1] + if insert_index >= len(l): + end = None + else: + end = l[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + if sys.version_info < (2,7): + content += "\n" + try: + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + except ValueError: + start, end = getstatementrange_old(lineno, source, assertion) + return None, start, end + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end + + +def getstatementrange_old(lineno, source, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + raise an IndexError if no such statementrange can be found. + """ + # XXX this logic is only used on python2.4 and below + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + if assertion: + line = source.lines[start] + # the following lines are not fully tested, change with care + if 'super' in line and 'self' in line and '__init__' in line: + raise IndexError("likely a subclass") + if "assert" not in line and "raise" not in line: + continue + trylines = source.lines[start:lineno+1] + # quick hack to prepare parsing an indented line with + # compile_command() (which errors on "return" outside defs) + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + continue + + # 2. find the end of the statement + for end in range(lineno+1, len(source)+1): + trysource = source[start:end] + if trysource.isparseable(): + return start, end + raise SyntaxError("no valid source range around line %d " % (lineno,)) + + diff --git a/third_party/python/py/py/_error.py b/third_party/python/py/py/_error.py index 550fb521a047d..228de58b93c3d 100644 --- a/third_party/python/py/py/_error.py +++ b/third_party/python/py/py/_error.py @@ -1,88 +1,89 @@ -""" -create errno-specific classes for IO or os calls. - -""" -import sys, os, errno - -class Error(EnvironmentError): - def __repr__(self): - return "%s.%s %r: %s " %(self.__class__.__module__, - self.__class__.__name__, - self.__class__.__doc__, - " ".join(map(str, self.args)), - #repr(self.args) - ) - - def __str__(self): - s = "[%s]: %s" %(self.__class__.__doc__, - " ".join(map(str, self.args)), - ) - return s - -_winerrnomap = { - 2: errno.ENOENT, - 3: errno.ENOENT, - 17: errno.EEXIST, - 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable - 22: errno.ENOTDIR, - 20: errno.ENOTDIR, - 267: errno.ENOTDIR, - 5: errno.EACCES, # anything better? -} - -class ErrorMaker(object): - """ lazily provides Exception classes for each possible POSIX errno - (as defined per the 'errno' module). All such instances - subclass EnvironmentError. - """ - Error = Error - _errno2class = {} - - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError(name) - eno = getattr(errno, name) - cls = self._geterrnoclass(eno) - setattr(self, name, cls) - return cls - - def _geterrnoclass(self, eno): - try: - return self._errno2class[eno] - except KeyError: - clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,)) - errorcls = type(Error)(clsname, (Error,), - {'__module__':'py.error', - '__doc__': os.strerror(eno)}) - self._errno2class[eno] = errorcls - return errorcls - - def checked_call(self, func, *args, **kwargs): - """ call a function and raise an errno-exception if applicable. """ - __tracebackhide__ = True - try: - return func(*args, **kwargs) - except self.Error: - raise - except (OSError, EnvironmentError): - cls, value, tb = sys.exc_info() - if not hasattr(value, 'errno'): - raise - __tracebackhide__ = False - errno = value.errno - try: - if not isinstance(value, WindowsError): - raise NameError - except NameError: - # we are not on Windows, or we got a proper OSError - cls = self._geterrnoclass(errno) - else: - try: - cls = self._geterrnoclass(_winerrnomap[errno]) - except KeyError: - raise value - raise cls("%s%r" % (func.__name__, args)) - __tracebackhide__ = True - - -error = ErrorMaker() +""" +create errno-specific classes for IO or os calls. + +""" +import sys, os, errno + +class Error(EnvironmentError): + def __repr__(self): + return "%s.%s %r: %s " %(self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + #repr(self.args) + ) + + def __str__(self): + s = "[%s]: %s" %(self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + +class ErrorMaker(object): + """ lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + Error = Error + _errno2class = {} + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno): + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,)) + errorcls = type(Error)(clsname, (Error,), + {'__module__':'py.error', + '__doc__': os.strerror(eno)}) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call(self, func, *args, **kwargs): + """ call a function and raise an errno-exception if applicable. """ + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except self.Error: + raise + except (OSError, EnvironmentError): + cls, value, tb = sys.exc_info() + if not hasattr(value, 'errno'): + raise + __tracebackhide__ = False + errno = value.errno + try: + if not isinstance(value, WindowsError): + raise NameError + except NameError: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + else: + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + raise cls("%s%r" % (func.__name__, args)) + __tracebackhide__ = True + + +error = ErrorMaker() diff --git a/third_party/python/py/py/_iniconfig.py b/third_party/python/py/py/_iniconfig.py index 92b50bd853a33..8e46404beafa4 100644 --- a/third_party/python/py/py/_iniconfig.py +++ b/third_party/python/py/py/_iniconfig.py @@ -1,162 +1,162 @@ -""" brain-dead simple parser for ini-style files. -(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed -""" -__version__ = "0.2.dev2" - -__all__ = ['IniConfig', 'ParseError'] - -COMMENTCHARS = "#;" - -class ParseError(Exception): - def __init__(self, path, lineno, msg): - Exception.__init__(self, path, lineno, msg) - self.path = path - self.lineno = lineno - self.msg = msg - - def __str__(self): - return "%s:%s: %s" %(self.path, self.lineno+1, self.msg) - -class SectionWrapper(object): - def __init__(self, config, name): - self.config = config - self.name = name - - def lineof(self, name): - return self.config.lineof(self.name, name) - - def get(self, key, default=None, convert=str): - return self.config.get(self.name, key, convert=convert, default=default) - - def __getitem__(self, key): - return self.config.sections[self.name][key] - - def __iter__(self): - section = self.config.sections.get(self.name, []) - def lineof(key): - return self.config.lineof(self.name, key) - for name in sorted(section, key=lineof): - yield name - - def items(self): - for name in self: - yield name, self[name] - - -class IniConfig(object): - def __init__(self, path, data=None): - self.path = str(path) # convenience - if data is None: - f = open(self.path) - try: - tokens = self._parse(iter(f)) - finally: - f.close() - else: - tokens = self._parse(data.splitlines(True)) - - self._sources = {} - self.sections = {} - - for lineno, section, name, value in tokens: - if section is None: - self._raise(lineno, 'no section header defined') - self._sources[section, name] = lineno - if name is None: - if section in self.sections: - self._raise(lineno, 'duplicate section %r'%(section, )) - self.sections[section] = {} - else: - if name in self.sections[section]: - self._raise(lineno, 'duplicate name %r'%(name, )) - self.sections[section][name] = value - - def _raise(self, lineno, msg): - raise ParseError(self.path, lineno, msg) - - def _parse(self, line_iter): - result = [] - section = None - for lineno, line in enumerate(line_iter): - name, data = self._parseline(line, lineno) - # new value - if name is not None and data is not None: - result.append((lineno, section, name, data)) - # new section - elif name is not None and data is None: - if not name: - self._raise(lineno, 'empty section name') - section = name - result.append((lineno, section, None, None)) - # continuation - elif name is None and data is not None: - if not result: - self._raise(lineno, 'unexpected value continuation') - last = result.pop() - last_name, last_data = last[-2:] - if last_name is None: - self._raise(lineno, 'unexpected value continuation') - - if last_data: - data = '%s\n%s' % (last_data, data) - result.append(last[:-1] + (data,)) - return result - - def _parseline(self, line, lineno): - # blank lines - if iscommentline(line): - line = "" - else: - line = line.rstrip() - if not line: - return None, None - # section - if line[0] == '[': - realline = line - for c in COMMENTCHARS: - line = line.split(c)[0].rstrip() - if line[-1] == "]": - return line[1:-1], None - return None, realline.strip() - # value - elif not line[0].isspace(): - try: - name, value = line.split('=', 1) - if ":" in name: - raise ValueError() - except ValueError: - try: - name, value = line.split(":", 1) - except ValueError: - self._raise(lineno, 'unexpected line: %r' % line) - return name.strip(), value.strip() - # continuation - else: - return None, line.strip() - - def lineof(self, section, name=None): - lineno = self._sources.get((section, name)) - if lineno is not None: - return lineno + 1 - - def get(self, section, name, default=None, convert=str): - try: - return convert(self.sections[section][name]) - except KeyError: - return default - - def __getitem__(self, name): - if name not in self.sections: - raise KeyError(name) - return SectionWrapper(self, name) - - def __iter__(self): - for name in sorted(self.sections, key=self.lineof): - yield SectionWrapper(self, name) - - def __contains__(self, arg): - return arg in self.sections - -def iscommentline(line): - c = line.lstrip()[:1] - return c in COMMENTCHARS +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +__version__ = "0.2.dev2" + +__all__ = ['IniConfig', 'ParseError'] + +COMMENTCHARS = "#;" + +class ParseError(Exception): + def __init__(self, path, lineno, msg): + Exception.__init__(self, path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self): + return "%s:%s: %s" %(self.path, self.lineno+1, self.msg) + +class SectionWrapper(object): + def __init__(self, config, name): + self.config = config + self.name = name + + def lineof(self, name): + return self.config.lineof(self.name, name) + + def get(self, key, default=None, convert=str): + return self.config.get(self.name, key, convert=convert, default=default) + + def __getitem__(self, key): + return self.config.sections[self.name][key] + + def __iter__(self): + section = self.config.sections.get(self.name, []) + def lineof(key): + return self.config.lineof(self.name, key) + for name in sorted(section, key=lineof): + yield name + + def items(self): + for name in self: + yield name, self[name] + + +class IniConfig(object): + def __init__(self, path, data=None): + self.path = str(path) # convenience + if data is None: + f = open(self.path) + try: + tokens = self._parse(iter(f)) + finally: + f.close() + else: + tokens = self._parse(data.splitlines(True)) + + self._sources = {} + self.sections = {} + + for lineno, section, name, value in tokens: + if section is None: + self._raise(lineno, 'no section header defined') + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + self._raise(lineno, 'duplicate section %r'%(section, )) + self.sections[section] = {} + else: + if name in self.sections[section]: + self._raise(lineno, 'duplicate name %r'%(name, )) + self.sections[section][name] = value + + def _raise(self, lineno, msg): + raise ParseError(self.path, lineno, msg) + + def _parse(self, line_iter): + result = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = self._parseline(line, lineno) + # new value + if name is not None and data is not None: + result.append((lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + self._raise(lineno, 'empty section name') + section = name + result.append((lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + self._raise(lineno, 'unexpected value continuation') + last = result.pop() + last_name, last_data = last[-2:] + if last_name is None: + self._raise(lineno, 'unexpected value continuation') + + if last_data: + data = '%s\n%s' % (last_data, data) + result.append(last[:-1] + (data,)) + return result + + def _parseline(self, line, lineno): + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == '[': + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + return line[1:-1], None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split('=', 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + self._raise(lineno, 'unexpected line: %r' % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + def lineof(self, section, name=None): + lineno = self._sources.get((section, name)) + if lineno is not None: + return lineno + 1 + + def get(self, section, name, default=None, convert=str): + try: + return convert(self.sections[section][name]) + except KeyError: + return default + + def __getitem__(self, name): + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self): + for name in sorted(self.sections, key=self.lineof): + yield SectionWrapper(self, name) + + def __contains__(self, arg): + return arg in self.sections + +def iscommentline(line): + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/third_party/python/py/py/_io/__init__.py b/third_party/python/py/py/_io/__init__.py index 835f01f3ab9dc..f1a6d6318b228 100644 --- a/third_party/python/py/py/_io/__init__.py +++ b/third_party/python/py/py/_io/__init__.py @@ -1 +1 @@ -""" input/output helping """ +""" input/output helping """ diff --git a/third_party/python/py/py/_io/capture.py b/third_party/python/py/py/_io/capture.py index bc157ed978f72..3ce2259776c38 100644 --- a/third_party/python/py/py/_io/capture.py +++ b/third_party/python/py/py/_io/capture.py @@ -1,371 +1,371 @@ -import os -import sys -import py -import tempfile - -try: - from io import StringIO -except ImportError: - from StringIO import StringIO - -if sys.version_info < (3,0): - class TextIO(StringIO): - def write(self, data): - if not isinstance(data, unicode): - data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace') - StringIO.write(self, data) -else: - TextIO = StringIO - -try: - from io import BytesIO -except ImportError: - class BytesIO(StringIO): - def write(self, data): - if isinstance(data, unicode): - raise TypeError("not a byte value: %r" %(data,)) - StringIO.write(self, data) - -patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} - -class FDCapture: - """ Capture IO to/from a given os-level filedescriptor. """ - - def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False): - """ save targetfd descriptor, and open a new - temporary file there. If no tmpfile is - specified a tempfile.Tempfile() will be opened - in text mode. - """ - self.targetfd = targetfd - if tmpfile is None and targetfd != 0: - f = tempfile.TemporaryFile('wb+') - tmpfile = dupfile(f, encoding="UTF-8") - f.close() - self.tmpfile = tmpfile - self._savefd = os.dup(self.targetfd) - if patchsys: - self._oldsys = getattr(sys, patchsysdict[targetfd]) - if now: - self.start() - - def start(self): - try: - os.fstat(self._savefd) - except OSError: - raise ValueError("saved filedescriptor not valid, " - "did you call start() twice?") - if self.targetfd == 0 and not self.tmpfile: - fd = os.open(devnullpath, os.O_RDONLY) - os.dup2(fd, 0) - os.close(fd) - if hasattr(self, '_oldsys'): - setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) - else: - os.dup2(self.tmpfile.fileno(), self.targetfd) - if hasattr(self, '_oldsys'): - setattr(sys, patchsysdict[self.targetfd], self.tmpfile) - - def done(self): - """ unpatch and clean up, returns the self.tmpfile (file object) - """ - os.dup2(self._savefd, self.targetfd) - os.close(self._savefd) - if self.targetfd != 0: - self.tmpfile.seek(0) - if hasattr(self, '_oldsys'): - setattr(sys, patchsysdict[self.targetfd], self._oldsys) - return self.tmpfile - - def writeorg(self, data): - """ write a string to the original file descriptor - """ - tempfp = tempfile.TemporaryFile() - try: - os.dup2(self._savefd, tempfp.fileno()) - tempfp.write(data) - finally: - tempfp.close() - - -def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): - """ return a new open file object that's a duplicate of f - - mode is duplicated if not given, 'buffering' controls - buffer size (defaulting to no buffering) and 'raising' - defines whether an exception is raised when an incompatible - file object is passed in (if raising is False, the file - object itself will be returned) - """ - try: - fd = f.fileno() - mode = mode or f.mode - except AttributeError: - if raising: - raise - return f - newfd = os.dup(fd) - if sys.version_info >= (3,0): - if encoding is not None: - mode = mode.replace("b", "") - buffering = True - return os.fdopen(newfd, mode, buffering, encoding, closefd=True) - else: - f = os.fdopen(newfd, mode, buffering) - if encoding is not None: - return EncodedFile(f, encoding) - return f - -class EncodedFile(object): - def __init__(self, _stream, encoding): - self._stream = _stream - self.encoding = encoding - - def write(self, obj): - if isinstance(obj, unicode): - obj = obj.encode(self.encoding) - elif isinstance(obj, str): - pass - else: - obj = str(obj) - self._stream.write(obj) - - def writelines(self, linelist): - data = ''.join(linelist) - self.write(data) - - def __getattr__(self, name): - return getattr(self._stream, name) - -class Capture(object): - def call(cls, func, *args, **kwargs): - """ return a (res, out, err) tuple where - out and err represent the output/error output - during function execution. - call the given function with args/kwargs - and capture output/error during its execution. - """ - so = cls() - try: - res = func(*args, **kwargs) - finally: - out, err = so.reset() - return res, out, err - call = classmethod(call) - - def reset(self): - """ reset sys.stdout/stderr and return captured output as strings. """ - if hasattr(self, '_reset'): - raise ValueError("was already reset") - self._reset = True - outfile, errfile = self.done(save=False) - out, err = "", "" - if outfile and not outfile.closed: - out = outfile.read() - outfile.close() - if errfile and errfile != outfile and not errfile.closed: - err = errfile.read() - errfile.close() - return out, err - - def suspend(self): - """ return current snapshot captures, memorize tempfiles. """ - outerr = self.readouterr() - outfile, errfile = self.done() - return outerr - - -class StdCaptureFD(Capture): - """ This class allows to capture writes to FD1 and FD2 - and may connect a NULL file to FD0 (and prevent - reads from sys.stdin). If any of the 0,1,2 file descriptors - is invalid it will not be captured. - """ - def __init__(self, out=True, err=True, mixed=False, - in_=True, patchsys=True, now=True): - self._options = { - "out": out, - "err": err, - "mixed": mixed, - "in_": in_, - "patchsys": patchsys, - "now": now, - } - self._save() - if now: - self.startall() - - def _save(self): - in_ = self._options['in_'] - out = self._options['out'] - err = self._options['err'] - mixed = self._options['mixed'] - patchsys = self._options['patchsys'] - if in_: - try: - self.in_ = FDCapture(0, tmpfile=None, now=False, - patchsys=patchsys) - except OSError: - pass - if out: - tmpfile = None - if hasattr(out, 'write'): - tmpfile = out - try: - self.out = FDCapture(1, tmpfile=tmpfile, - now=False, patchsys=patchsys) - self._options['out'] = self.out.tmpfile - except OSError: - pass - if err: - if out and mixed: - tmpfile = self.out.tmpfile - elif hasattr(err, 'write'): - tmpfile = err - else: - tmpfile = None - try: - self.err = FDCapture(2, tmpfile=tmpfile, - now=False, patchsys=patchsys) - self._options['err'] = self.err.tmpfile - except OSError: - pass - - def startall(self): - if hasattr(self, 'in_'): - self.in_.start() - if hasattr(self, 'out'): - self.out.start() - if hasattr(self, 'err'): - self.err.start() - - def resume(self): - """ resume capturing with original temp files. """ - self.startall() - - def done(self, save=True): - """ return (outfile, errfile) and stop capturing. """ - outfile = errfile = None - if hasattr(self, 'out') and not self.out.tmpfile.closed: - outfile = self.out.done() - if hasattr(self, 'err') and not self.err.tmpfile.closed: - errfile = self.err.done() - if hasattr(self, 'in_'): - tmpfile = self.in_.done() - if save: - self._save() - return outfile, errfile - - def readouterr(self): - """ return snapshot value of stdout/stderr capturings. """ - if hasattr(self, "out"): - out = self._readsnapshot(self.out.tmpfile) - else: - out = "" - if hasattr(self, "err"): - err = self._readsnapshot(self.err.tmpfile) - else: - err = "" - return [out, err] - - def _readsnapshot(self, f): - f.seek(0) - res = f.read() - enc = getattr(f, "encoding", None) - if enc: - res = py.builtin._totext(res, enc, "replace") - f.truncate(0) - f.seek(0) - return res - - -class StdCapture(Capture): - """ This class allows to capture writes to sys.stdout|stderr "in-memory" - and will raise errors on tries to read from sys.stdin. It only - modifies sys.stdout|stderr|stdin attributes and does not - touch underlying File Descriptors (use StdCaptureFD for that). - """ - def __init__(self, out=True, err=True, in_=True, mixed=False, now=True): - self._oldout = sys.stdout - self._olderr = sys.stderr - self._oldin = sys.stdin - if out and not hasattr(out, 'file'): - out = TextIO() - self.out = out - if err: - if mixed: - err = out - elif not hasattr(err, 'write'): - err = TextIO() - self.err = err - self.in_ = in_ - if now: - self.startall() - - def startall(self): - if self.out: - sys.stdout = self.out - if self.err: - sys.stderr = self.err - if self.in_: - sys.stdin = self.in_ = DontReadFromInput() - - def done(self, save=True): - """ return (outfile, errfile) and stop capturing. """ - outfile = errfile = None - if self.out and not self.out.closed: - sys.stdout = self._oldout - outfile = self.out - outfile.seek(0) - if self.err and not self.err.closed: - sys.stderr = self._olderr - errfile = self.err - errfile.seek(0) - if self.in_: - sys.stdin = self._oldin - return outfile, errfile - - def resume(self): - """ resume capturing with original temp files. """ - self.startall() - - def readouterr(self): - """ return snapshot value of stdout/stderr capturings. """ - out = err = "" - if self.out: - out = self.out.getvalue() - self.out.truncate(0) - self.out.seek(0) - if self.err: - err = self.err.getvalue() - self.err.truncate(0) - self.err.seek(0) - return out, err - -class DontReadFromInput: - """Temporary stub class. Ideally when stdin is accessed, the - capturing should be turned off, with possibly all data captured - so far sent to the screen. This should be configurable, though, - because in automated test runs it is better to crash than - hang indefinitely. - """ - def read(self, *args): - raise IOError("reading from stdin while output is captured") - readline = read - readlines = read - __iter__ = read - - def fileno(self): - raise ValueError("redirected Stdin is pseudofile, has no fileno()") - def isatty(self): - return False - def close(self): - pass - -try: - devnullpath = os.devnull -except AttributeError: - if os.name == 'nt': - devnullpath = 'NUL' - else: - devnullpath = '/dev/null' +import os +import sys +import py +import tempfile + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +if sys.version_info < (3,0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" %(data,)) + StringIO.write(self, data) + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + if now: + self.start() + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError("saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(devnullpath, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3,0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + elif isinstance(obj, str): + pass + else: + obj = str(obj) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + +class Capture(object): + def call(cls, func, *args, **kwargs): + """ return a (res, out, err) tuple where + out and err represent the output/error output + during function execution. + call the given function with args/kwargs + and capture output/error during its execution. + """ + so = cls() + try: + res = func(*args, **kwargs) + finally: + out, err = so.reset() + return res, out, err + call = classmethod(call) + + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, mixed=False, + in_=True, patchsys=True, now=True): + self._options = { + "out": out, + "err": err, + "mixed": mixed, + "in_": in_, + "patchsys": patchsys, + "now": now, + } + self._save() + if now: + self.startall() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + mixed = self._options['mixed'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture(0, tmpfile=None, now=False, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture(1, tmpfile=tmpfile, + now=False, patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if out and mixed: + tmpfile = self.out.tmpfile + elif hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture(2, tmpfile=tmpfile, + now=False, patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + tmpfile = self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + if hasattr(self, "out"): + out = self._readsnapshot(self.out.tmpfile) + else: + out = "" + if hasattr(self, "err"): + err = self._readsnapshot(self.err.tmpfile) + else: + err = "" + return [out, err] + + def _readsnapshot(self, f): + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True, mixed=False, now=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if mixed: + err = out + elif not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + if now: + self.startall() + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + def isatty(self): + return False + def close(self): + pass + +try: + devnullpath = os.devnull +except AttributeError: + if os.name == 'nt': + devnullpath = 'NUL' + else: + devnullpath = '/dev/null' diff --git a/third_party/python/py/py/_io/saferepr.py b/third_party/python/py/py/_io/saferepr.py index 8518290efddec..e86f18f424a80 100644 --- a/third_party/python/py/py/_io/saferepr.py +++ b/third_party/python/py/py/_io/saferepr.py @@ -1,71 +1,71 @@ -import py -import sys - -builtin_repr = repr - -reprlib = py.builtin._tryimport('repr', 'reprlib') - -class SafeRepr(reprlib.Repr): - """ subclass of repr.Repr that limits the resulting size of repr() - and includes information on exceptions raised during the call. - """ - def repr(self, x): - return self._callhelper(reprlib.Repr.repr, self, x) - - def repr_unicode(self, x, level): - # Strictly speaking wrong on narrow builds - def repr(u): - if "'" not in u: - return py.builtin._totext("'%s'") % u - elif '"' not in u: - return py.builtin._totext('"%s"') % u - else: - return py.builtin._totext("'%s'") % u.replace("'", r"\'") - s = repr(x[:self.maxstring]) - if len(s) > self.maxstring: - i = max(0, (self.maxstring-3)//2) - j = max(0, self.maxstring-3-i) - s = repr(x[:i] + x[len(x)-j:]) - s = s[:i] + '...' + s[len(s)-j:] - return s - - def repr_instance(self, x, level): - return self._callhelper(builtin_repr, x) - - def _callhelper(self, call, x, *args): - try: - # Try the vanilla repr and make sure that the result is a string - s = call(x, *args) - except py.builtin._sysex: - raise - except: - cls, e, tb = sys.exc_info() - exc_name = getattr(cls, '__name__', 'unknown') - try: - exc_info = str(e) - except py.builtin._sysex: - raise - except: - exc_info = 'unknown' - return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( - exc_name, exc_info, x.__class__.__name__, id(x)) - else: - if len(s) > self.maxsize: - i = max(0, (self.maxsize-3)//2) - j = max(0, self.maxsize-3-i) - s = s[:i] + '...' + s[len(s)-j:] - return s - -def saferepr(obj, maxsize=240): - """ return a size-limited safe repr-string for the given object. - Failing __repr__ functions of user instances will be represented - with a short exception info and 'saferepr' generally takes - care to never raise exceptions itself. This function is a wrapper - around the Repr/reprlib functionality of the standard 2.6 lib. - """ - # review exception handling - srepr = SafeRepr() - srepr.maxstring = maxsize - srepr.maxsize = maxsize - srepr.maxother = 160 - return srepr.repr(obj) +import py +import sys + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +class SafeRepr(reprlib.Repr): + """ subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ + def repr(self, x): + return self._callhelper(reprlib.Repr.repr, self, x) + + def repr_unicode(self, x, level): + # Strictly speaking wrong on narrow builds + def repr(u): + if "'" not in u: + return py.builtin._totext("'%s'") % u + elif '"' not in u: + return py.builtin._totext('"%s"') % u + else: + return py.builtin._totext("'%s'") % u.replace("'", r"\'") + s = repr(x[:self.maxstring]) + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = repr(x[:i] + x[len(x)-j:]) + s = s[:i] + '...' + s[len(s)-j:] + return s + + def repr_instance(self, x, level): + return self._callhelper(builtin_repr, x) + + def _callhelper(self, call, x, *args): + try: + # Try the vanilla repr and make sure that the result is a string + s = call(x, *args) + except py.builtin._sysex: + raise + except: + cls, e, tb = sys.exc_info() + exc_name = getattr(cls, '__name__', 'unknown') + try: + exc_info = str(e) + except py.builtin._sysex: + raise + except: + exc_info = 'unknown' + return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( + exc_name, exc_info, x.__class__.__name__, id(x)) + else: + if len(s) > self.maxsize: + i = max(0, (self.maxsize-3)//2) + j = max(0, self.maxsize-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + +def saferepr(obj, maxsize=240): + """ return a size-limited safe repr-string for the given object. + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. This function is a wrapper + around the Repr/reprlib functionality of the standard 2.6 lib. + """ + # review exception handling + srepr = SafeRepr() + srepr.maxstring = maxsize + srepr.maxsize = maxsize + srepr.maxother = 160 + return srepr.repr(obj) diff --git a/third_party/python/py/py/_io/terminalwriter.py b/third_party/python/py/py/_io/terminalwriter.py index cef1ff5809795..715547d90bade 100644 --- a/third_party/python/py/py/_io/terminalwriter.py +++ b/third_party/python/py/py/_io/terminalwriter.py @@ -1,348 +1,357 @@ -""" - -Helper functions for writing to terminals and files. - -""" - - -import sys, os -import py -py3k = sys.version_info[0] >= 3 -from py.builtin import text, bytes - -win32_and_ctypes = False -colorama = None -if sys.platform == "win32": - try: - import colorama - except ImportError: - try: - import ctypes - win32_and_ctypes = True - except ImportError: - pass - - -def _getdimensions(): - import termios,fcntl,struct - call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8) - height,width = struct.unpack( "hhhh", call ) [:2] - return height, width - - -def get_terminal_width(): - height = width = 0 - try: - height, width = _getdimensions() - except py.builtin._sysex: - raise - except: - # pass to fallback below - pass - - if width == 0: - # FALLBACK: - # * some exception happened - # * or this is emacs terminal which reports (0,0) - width = int(os.environ.get('COLUMNS', 80)) - - # XXX the windows getdimensions may be bogus, let's sanify a bit - if width < 40: - width = 80 - return width - -terminal_width = get_terminal_width() - -# XXX unify with _escaped func below -def ansi_print(text, esc, file=None, newline=True, flush=False): - if file is None: - file = sys.stderr - text = text.rstrip() - if esc and not isinstance(esc, tuple): - esc = (esc,) - if esc and sys.platform != "win32" and file.isatty(): - text = (''.join(['\x1b[%sm' % cod for cod in esc]) + - text + - '\x1b[0m') # ANSI color code "reset" - if newline: - text += '\n' - - if esc and win32_and_ctypes and file.isatty(): - if 1 in esc: - bold = True - esc = tuple([x for x in esc if x != 1]) - else: - bold = False - esctable = {() : FOREGROUND_WHITE, # normal - (31,): FOREGROUND_RED, # red - (32,): FOREGROUND_GREEN, # green - (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow - (34,): FOREGROUND_BLUE, # blue - (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple - (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan - (37,): FOREGROUND_WHITE, # white - (39,): FOREGROUND_WHITE, # reset - } - attr = esctable.get(esc, FOREGROUND_WHITE) - if bold: - attr |= FOREGROUND_INTENSITY - STD_OUTPUT_HANDLE = -11 - STD_ERROR_HANDLE = -12 - if file is sys.stderr: - handle = GetStdHandle(STD_ERROR_HANDLE) - else: - handle = GetStdHandle(STD_OUTPUT_HANDLE) - oldcolors = GetConsoleInfo(handle).wAttributes - attr |= (oldcolors & 0x0f0) - SetConsoleTextAttribute(handle, attr) - while len(text) > 32768: - file.write(text[:32768]) - text = text[32768:] - if text: - file.write(text) - SetConsoleTextAttribute(handle, oldcolors) - else: - file.write(text) - - if flush: - file.flush() - -def should_do_markup(file): - if os.environ.get('PY_COLORS') == '1': - return True - if os.environ.get('PY_COLORS') == '0': - return False - return hasattr(file, 'isatty') and file.isatty() \ - and os.environ.get('TERM') != 'dumb' \ - and not (sys.platform.startswith('java') and os._name == 'nt') - -class TerminalWriter(object): - _esctable = dict(black=30, red=31, green=32, yellow=33, - blue=34, purple=35, cyan=36, white=37, - Black=40, Red=41, Green=42, Yellow=43, - Blue=44, Purple=45, Cyan=46, White=47, - bold=1, light=2, blink=5, invert=7) - - # XXX deprecate stringio argument - def __init__(self, file=None, stringio=False, encoding=None): - if file is None: - if stringio: - self.stringio = file = py.io.TextIO() - else: - file = py.std.sys.stdout - elif py.builtin.callable(file) and not ( - hasattr(file, "write") and hasattr(file, "flush")): - file = WriteFile(file, encoding=encoding) - if hasattr(file, "isatty") and file.isatty() and colorama: - file = colorama.AnsiToWin32(file).stream - self.encoding = encoding or getattr(file, 'encoding', "utf-8") - self._file = file - self.fullwidth = get_terminal_width() - self.hasmarkup = should_do_markup(file) - self._lastlen = 0 - - def _escaped(self, text, esc): - if esc and self.hasmarkup: - text = (''.join(['\x1b[%sm' % cod for cod in esc]) + - text +'\x1b[0m') - return text - - def markup(self, text, **kw): - esc = [] - for name in kw: - if name not in self._esctable: - raise ValueError("unknown markup: %r" %(name,)) - if kw[name]: - esc.append(self._esctable[name]) - return self._escaped(text, tuple(esc)) - - def sep(self, sepchar, title=None, fullwidth=None, **kw): - if fullwidth is None: - fullwidth = self.fullwidth - # the goal is to have the line be as long as possible - # under the condition that len(line) <= fullwidth - if sys.platform == "win32": - # if we print in the last column on windows we are on a - # new line but there is no way to verify/neutralize this - # (we may not know the exact line width) - # so let's be defensive to avoid empty lines in the output - fullwidth -= 1 - if title is not None: - # we want 2 + 2*len(fill) + len(title) <= fullwidth - # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth - # 2*len(sepchar)*N <= fullwidth - len(title) - 2 - # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) - N = (fullwidth - len(title) - 2) // (2*len(sepchar)) - fill = sepchar * N - line = "%s %s %s" % (fill, title, fill) - else: - # we want len(sepchar)*N <= fullwidth - # i.e. N <= fullwidth // len(sepchar) - line = sepchar * (fullwidth // len(sepchar)) - # in some situations there is room for an extra sepchar at the right, - # in particular if we consider that with a sepchar like "_ " the - # trailing space is not important at the end of the line - if len(line) + len(sepchar.rstrip()) <= fullwidth: - line += sepchar.rstrip() - - self.line(line, **kw) - - def write(self, msg, **kw): - if msg: - if not isinstance(msg, (bytes, text)): - msg = text(msg) - if self.hasmarkup and kw: - markupmsg = self.markup(msg, **kw) - else: - markupmsg = msg - write_out(self._file, markupmsg) - - def line(self, s='', **kw): - self.write(s, **kw) - self._checkfill(s) - self.write('\n') - - def reline(self, line, **kw): - if not self.hasmarkup: - raise ValueError("cannot use rewrite-line without terminal") - self.write(line, **kw) - self._checkfill(line) - self.write('\r') - self._lastlen = len(line) - - def _checkfill(self, line): - diff2last = self._lastlen - len(line) - if diff2last > 0: - self.write(" " * diff2last) - -class Win32ConsoleWriter(TerminalWriter): - def write(self, msg, **kw): - if msg: - if not isinstance(msg, (bytes, text)): - msg = text(msg) - oldcolors = None - if self.hasmarkup and kw: - handle = GetStdHandle(STD_OUTPUT_HANDLE) - oldcolors = GetConsoleInfo(handle).wAttributes - default_bg = oldcolors & 0x00F0 - attr = default_bg - if kw.pop('bold', False): - attr |= FOREGROUND_INTENSITY - - if kw.pop('red', False): - attr |= FOREGROUND_RED - elif kw.pop('blue', False): - attr |= FOREGROUND_BLUE - elif kw.pop('green', False): - attr |= FOREGROUND_GREEN - elif kw.pop('yellow', False): - attr |= FOREGROUND_GREEN|FOREGROUND_RED - else: - attr |= oldcolors & 0x0007 - - SetConsoleTextAttribute(handle, attr) - write_out(self._file, msg) - if oldcolors: - SetConsoleTextAttribute(handle, oldcolors) - -class WriteFile(object): - def __init__(self, writemethod, encoding=None): - self.encoding = encoding - self._writemethod = writemethod - - def write(self, data): - if self.encoding: - data = data.encode(self.encoding, "replace") - self._writemethod(data) - - def flush(self): - return - - -if win32_and_ctypes: - TerminalWriter = Win32ConsoleWriter - import ctypes - from ctypes import wintypes - - # ctypes access to the Windows console - STD_OUTPUT_HANDLE = -11 - STD_ERROR_HANDLE = -12 - FOREGROUND_BLACK = 0x0000 # black text - FOREGROUND_BLUE = 0x0001 # text color contains blue. - FOREGROUND_GREEN = 0x0002 # text color contains green. - FOREGROUND_RED = 0x0004 # text color contains red. - FOREGROUND_WHITE = 0x0007 - FOREGROUND_INTENSITY = 0x0008 # text color is intensified. - BACKGROUND_BLACK = 0x0000 # background color black - BACKGROUND_BLUE = 0x0010 # background color contains blue. - BACKGROUND_GREEN = 0x0020 # background color contains green. - BACKGROUND_RED = 0x0040 # background color contains red. - BACKGROUND_WHITE = 0x0070 - BACKGROUND_INTENSITY = 0x0080 # background color is intensified. - - SHORT = ctypes.c_short - class COORD(ctypes.Structure): - _fields_ = [('X', SHORT), - ('Y', SHORT)] - class SMALL_RECT(ctypes.Structure): - _fields_ = [('Left', SHORT), - ('Top', SHORT), - ('Right', SHORT), - ('Bottom', SHORT)] - class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): - _fields_ = [('dwSize', COORD), - ('dwCursorPosition', COORD), - ('wAttributes', wintypes.WORD), - ('srWindow', SMALL_RECT), - ('dwMaximumWindowSize', COORD)] - - _GetStdHandle = ctypes.windll.kernel32.GetStdHandle - _GetStdHandle.argtypes = [wintypes.DWORD] - _GetStdHandle.restype = wintypes.HANDLE - def GetStdHandle(kind): - return _GetStdHandle(kind) - - SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute - SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD] - SetConsoleTextAttribute.restype = wintypes.BOOL - - _GetConsoleScreenBufferInfo = \ - ctypes.windll.kernel32.GetConsoleScreenBufferInfo - _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, - ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)] - _GetConsoleScreenBufferInfo.restype = wintypes.BOOL - def GetConsoleInfo(handle): - info = CONSOLE_SCREEN_BUFFER_INFO() - _GetConsoleScreenBufferInfo(handle, ctypes.byref(info)) - return info - - def _getdimensions(): - handle = GetStdHandle(STD_OUTPUT_HANDLE) - info = GetConsoleInfo(handle) - # Substract one from the width, otherwise the cursor wraps - # and the ending \n causes an empty line to display. - return info.dwSize.Y, info.dwSize.X - 1 - -def write_out(fil, msg): - # XXX sometimes "msg" is of type bytes, sometimes text which - # complicates the situation. Should we try to enforce unicode? - try: - # on py27 and above writing out to sys.stdout with an encoding - # should usually work for unicode messages (if the encoding is - # capable of it) - fil.write(msg) - except UnicodeEncodeError: - # on py26 it might not work because stdout expects bytes - if fil.encoding: - try: - fil.write(msg.encode(fil.encoding)) - except UnicodeEncodeError: - # it might still fail if the encoding is not capable - pass - else: - fil.flush() - return - # fallback: escape all unicode characters - msg = msg.encode("unicode-escape").decode("ascii") - fil.write(msg) - fil.flush() +""" + +Helper functions for writing to terminals and files. + +""" + + +import sys, os +import py +py3k = sys.version_info[0] >= 3 +from py.builtin import text, bytes + +win32_and_ctypes = False +colorama = None +if sys.platform == "win32": + try: + import colorama + except ImportError: + try: + import ctypes + win32_and_ctypes = True + except ImportError: + pass + + +def _getdimensions(): + import termios,fcntl,struct + call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8) + height,width = struct.unpack( "hhhh", call ) [:2] + return height, width + + +def get_terminal_width(): + height = width = 0 + try: + height, width = _getdimensions() + except py.builtin._sysex: + raise + except: + # pass to fallback below + pass + + if width == 0: + # FALLBACK: + # * some exception happened + # * or this is emacs terminal which reports (0,0) + width = int(os.environ.get('COLUMNS', 80)) + + # XXX the windows getdimensions may be bogus, let's sanify a bit + if width < 40: + width = 80 + return width + +terminal_width = get_terminal_width() + +# XXX unify with _escaped func below +def ansi_print(text, esc, file=None, newline=True, flush=False): + if file is None: + file = sys.stderr + text = text.rstrip() + if esc and not isinstance(esc, tuple): + esc = (esc,) + if esc and sys.platform != "win32" and file.isatty(): + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text + + '\x1b[0m') # ANSI color code "reset" + if newline: + text += '\n' + + if esc and win32_and_ctypes and file.isatty(): + if 1 in esc: + bold = True + esc = tuple([x for x in esc if x != 1]) + else: + bold = False + esctable = {() : FOREGROUND_WHITE, # normal + (31,): FOREGROUND_RED, # red + (32,): FOREGROUND_GREEN, # green + (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow + (34,): FOREGROUND_BLUE, # blue + (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple + (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan + (37,): FOREGROUND_WHITE, # white + (39,): FOREGROUND_WHITE, # reset + } + attr = esctable.get(esc, FOREGROUND_WHITE) + if bold: + attr |= FOREGROUND_INTENSITY + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + if file is sys.stderr: + handle = GetStdHandle(STD_ERROR_HANDLE) + else: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + attr |= (oldcolors & 0x0f0) + SetConsoleTextAttribute(handle, attr) + while len(text) > 32768: + file.write(text[:32768]) + text = text[32768:] + if text: + file.write(text) + SetConsoleTextAttribute(handle, oldcolors) + else: + file.write(text) + + if flush: + file.flush() + +def should_do_markup(file): + if os.environ.get('PY_COLORS') == '1': + return True + if os.environ.get('PY_COLORS') == '0': + return False + return hasattr(file, 'isatty') and file.isatty() \ + and os.environ.get('TERM') != 'dumb' \ + and not (sys.platform.startswith('java') and os._name == 'nt') + +class TerminalWriter(object): + _esctable = dict(black=30, red=31, green=32, yellow=33, + blue=34, purple=35, cyan=36, white=37, + Black=40, Red=41, Green=42, Yellow=43, + Blue=44, Purple=45, Cyan=46, White=47, + bold=1, light=2, blink=5, invert=7) + + # XXX deprecate stringio argument + def __init__(self, file=None, stringio=False, encoding=None): + if file is None: + if stringio: + self.stringio = file = py.io.TextIO() + else: + file = py.std.sys.stdout + elif py.builtin.callable(file) and not ( + hasattr(file, "write") and hasattr(file, "flush")): + file = WriteFile(file, encoding=encoding) + if hasattr(file, "isatty") and file.isatty() and colorama: + file = colorama.AnsiToWin32(file).stream + self.encoding = encoding or getattr(file, 'encoding', "utf-8") + self._file = file + self.hasmarkup = should_do_markup(file) + self._lastlen = 0 + + @property + def fullwidth(self): + if hasattr(self, '_terminal_width'): + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value): + self._terminal_width = value + + def _escaped(self, text, esc): + if esc and self.hasmarkup: + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text +'\x1b[0m') + return text + + def markup(self, text, **kw): + esc = [] + for name in kw: + if name not in self._esctable: + raise ValueError("unknown markup: %r" %(name,)) + if kw[name]: + esc.append(self._esctable[name]) + return self._escaped(text, tuple(esc)) + + def sep(self, sepchar, title=None, fullwidth=None, **kw): + if fullwidth is None: + fullwidth = self.fullwidth + # the goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth + if sys.platform == "win32": + # if we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width) + # so let's be defensive to avoid empty lines in the output + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = (fullwidth - len(title) - 2) // (2*len(sepchar)) + fill = sepchar * N + line = "%s %s %s" % (fill, title, fill) + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # in some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **kw) + + def write(self, msg, **kw): + if msg: + if not isinstance(msg, (bytes, text)): + msg = text(msg) + if self.hasmarkup and kw: + markupmsg = self.markup(msg, **kw) + else: + markupmsg = msg + write_out(self._file, markupmsg) + + def line(self, s='', **kw): + self.write(s, **kw) + self._checkfill(s) + self.write('\n') + + def reline(self, line, **kw): + if not self.hasmarkup: + raise ValueError("cannot use rewrite-line without terminal") + self.write(line, **kw) + self._checkfill(line) + self.write('\r') + self._lastlen = len(line) + + def _checkfill(self, line): + diff2last = self._lastlen - len(line) + if diff2last > 0: + self.write(" " * diff2last) + +class Win32ConsoleWriter(TerminalWriter): + def write(self, msg, **kw): + if msg: + if not isinstance(msg, (bytes, text)): + msg = text(msg) + oldcolors = None + if self.hasmarkup and kw: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + default_bg = oldcolors & 0x00F0 + attr = default_bg + if kw.pop('bold', False): + attr |= FOREGROUND_INTENSITY + + if kw.pop('red', False): + attr |= FOREGROUND_RED + elif kw.pop('blue', False): + attr |= FOREGROUND_BLUE + elif kw.pop('green', False): + attr |= FOREGROUND_GREEN + elif kw.pop('yellow', False): + attr |= FOREGROUND_GREEN|FOREGROUND_RED + else: + attr |= oldcolors & 0x0007 + + SetConsoleTextAttribute(handle, attr) + write_out(self._file, msg) + if oldcolors: + SetConsoleTextAttribute(handle, oldcolors) + +class WriteFile(object): + def __init__(self, writemethod, encoding=None): + self.encoding = encoding + self._writemethod = writemethod + + def write(self, data): + if self.encoding: + data = data.encode(self.encoding, "replace") + self._writemethod(data) + + def flush(self): + return + + +if win32_and_ctypes: + TerminalWriter = Win32ConsoleWriter + import ctypes + from ctypes import wintypes + + # ctypes access to the Windows console + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + FOREGROUND_BLACK = 0x0000 # black text + FOREGROUND_BLUE = 0x0001 # text color contains blue. + FOREGROUND_GREEN = 0x0002 # text color contains green. + FOREGROUND_RED = 0x0004 # text color contains red. + FOREGROUND_WHITE = 0x0007 + FOREGROUND_INTENSITY = 0x0008 # text color is intensified. + BACKGROUND_BLACK = 0x0000 # background color black + BACKGROUND_BLUE = 0x0010 # background color contains blue. + BACKGROUND_GREEN = 0x0020 # background color contains green. + BACKGROUND_RED = 0x0040 # background color contains red. + BACKGROUND_WHITE = 0x0070 + BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + + SHORT = ctypes.c_short + class COORD(ctypes.Structure): + _fields_ = [('X', SHORT), + ('Y', SHORT)] + class SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', SHORT), + ('Top', SHORT), + ('Right', SHORT), + ('Bottom', SHORT)] + class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', COORD), + ('dwCursorPosition', COORD), + ('wAttributes', wintypes.WORD), + ('srWindow', SMALL_RECT), + ('dwMaximumWindowSize', COORD)] + + _GetStdHandle = ctypes.windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [wintypes.DWORD] + _GetStdHandle.restype = wintypes.HANDLE + def GetStdHandle(kind): + return _GetStdHandle(kind) + + SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute + SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD] + SetConsoleTextAttribute.restype = wintypes.BOOL + + _GetConsoleScreenBufferInfo = \ + ctypes.windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, + ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + def GetConsoleInfo(handle): + info = CONSOLE_SCREEN_BUFFER_INFO() + _GetConsoleScreenBufferInfo(handle, ctypes.byref(info)) + return info + + def _getdimensions(): + handle = GetStdHandle(STD_OUTPUT_HANDLE) + info = GetConsoleInfo(handle) + # Substract one from the width, otherwise the cursor wraps + # and the ending \n causes an empty line to display. + return info.dwSize.Y, info.dwSize.X - 1 + +def write_out(fil, msg): + # XXX sometimes "msg" is of type bytes, sometimes text which + # complicates the situation. Should we try to enforce unicode? + try: + # on py27 and above writing out to sys.stdout with an encoding + # should usually work for unicode messages (if the encoding is + # capable of it) + fil.write(msg) + except UnicodeEncodeError: + # on py26 it might not work because stdout expects bytes + if fil.encoding: + try: + fil.write(msg.encode(fil.encoding)) + except UnicodeEncodeError: + # it might still fail if the encoding is not capable + pass + else: + fil.flush() + return + # fallback: escape all unicode characters + msg = msg.encode("unicode-escape").decode("ascii") + fil.write(msg) + fil.flush() diff --git a/third_party/python/py/py/_log/__init__.py b/third_party/python/py/py/_log/__init__.py index fad62e960d4fc..b211e2dece4c5 100644 --- a/third_party/python/py/py/_log/__init__.py +++ b/third_party/python/py/py/_log/__init__.py @@ -1,2 +1,2 @@ -""" logging API ('producers' and 'consumers' connected via keywords) """ - +""" logging API ('producers' and 'consumers' connected via keywords) """ + diff --git a/third_party/python/py/py/_log/log.py b/third_party/python/py/py/_log/log.py index ce47e8c754ad6..86205fe9ef8a0 100644 --- a/third_party/python/py/py/_log/log.py +++ b/third_party/python/py/py/_log/log.py @@ -1,186 +1,186 @@ -""" -basic logging functionality based on a producer/consumer scheme. - -XXX implement this API: (maybe put it into slogger.py?) - - log = Logger( - info=py.log.STDOUT, - debug=py.log.STDOUT, - command=None) - log.info("hello", "world") - log.command("hello", "world") - - log = Logger(info=Logger(something=...), - debug=py.log.STDOUT, - command=None) -""" -import py, sys - -class Message(object): - def __init__(self, keywords, args): - self.keywords = keywords - self.args = args - - def content(self): - return " ".join(map(str, self.args)) - - def prefix(self): - return "[%s] " % (":".join(self.keywords)) - - def __str__(self): - return self.prefix() + self.content() - - -class Producer(object): - """ (deprecated) Log producer API which sends messages to be logged - to a 'consumer' object, which then prints them to stdout, - stderr, files, etc. Used extensively by PyPy-1.1. - """ - - Message = Message # to allow later customization - keywords2consumer = {} - - def __init__(self, keywords, keywordmapper=None, **kw): - if hasattr(keywords, 'split'): - keywords = tuple(keywords.split()) - self._keywords = keywords - if keywordmapper is None: - keywordmapper = default_keywordmapper - self._keywordmapper = keywordmapper - - def __repr__(self): - return "" % ":".join(self._keywords) - - def __getattr__(self, name): - if '_' in name: - raise AttributeError(name) - producer = self.__class__(self._keywords + (name,)) - setattr(self, name, producer) - return producer - - def __call__(self, *args): - """ write a message to the appropriate consumer(s) """ - func = self._keywordmapper.getconsumer(self._keywords) - if func is not None: - func(self.Message(self._keywords, args)) - -class KeywordMapper: - def __init__(self): - self.keywords2consumer = {} - - def getstate(self): - return self.keywords2consumer.copy() - def setstate(self, state): - self.keywords2consumer.clear() - self.keywords2consumer.update(state) - - def getconsumer(self, keywords): - """ return a consumer matching the given keywords. - - tries to find the most suitable consumer by walking, starting from - the back, the list of keywords, the first consumer matching a - keyword is returned (falling back to py.log.default) - """ - for i in range(len(keywords), 0, -1): - try: - return self.keywords2consumer[keywords[:i]] - except KeyError: - continue - return self.keywords2consumer.get('default', default_consumer) - - def setconsumer(self, keywords, consumer): - """ set a consumer for a set of keywords. """ - # normalize to tuples - if isinstance(keywords, str): - keywords = tuple(filter(None, keywords.split())) - elif hasattr(keywords, '_keywords'): - keywords = keywords._keywords - elif not isinstance(keywords, tuple): - raise TypeError("key %r is not a string or tuple" % (keywords,)) - if consumer is not None and not py.builtin.callable(consumer): - if not hasattr(consumer, 'write'): - raise TypeError( - "%r should be None, callable or file-like" % (consumer,)) - consumer = File(consumer) - self.keywords2consumer[keywords] = consumer - -def default_consumer(msg): - """ the default consumer, prints the message to stdout (using 'print') """ - sys.stderr.write(str(msg)+"\n") - -default_keywordmapper = KeywordMapper() - -def setconsumer(keywords, consumer): - default_keywordmapper.setconsumer(keywords, consumer) - -def setstate(state): - default_keywordmapper.setstate(state) -def getstate(): - return default_keywordmapper.getstate() - -# -# Consumers -# - -class File(object): - """ log consumer wrapping a file(-like) object """ - def __init__(self, f): - assert hasattr(f, 'write') - #assert isinstance(f, file) or not hasattr(f, 'open') - self._file = f - - def __call__(self, msg): - """ write a message to the log """ - self._file.write(str(msg) + "\n") - if hasattr(self._file, 'flush'): - self._file.flush() - -class Path(object): - """ log consumer that opens and writes to a Path """ - def __init__(self, filename, append=False, - delayed_create=False, buffering=False): - self._append = append - self._filename = str(filename) - self._buffering = buffering - if not delayed_create: - self._openfile() - - def _openfile(self): - mode = self._append and 'a' or 'w' - f = open(self._filename, mode) - self._file = f - - def __call__(self, msg): - """ write a message to the log """ - if not hasattr(self, "_file"): - self._openfile() - self._file.write(str(msg) + "\n") - if not self._buffering: - self._file.flush() - -def STDOUT(msg): - """ consumer that writes to sys.stdout """ - sys.stdout.write(str(msg)+"\n") - -def STDERR(msg): - """ consumer that writes to sys.stderr """ - sys.stderr.write(str(msg)+"\n") - -class Syslog: - """ consumer that writes to the syslog daemon """ - - def __init__(self, priority = None): - if priority is None: - priority = self.LOG_INFO - self.priority = priority - - def __call__(self, msg): - """ write a message to the log """ - py.std.syslog.syslog(self.priority, str(msg)) - -for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): - _prio = "LOG_" + _prio - try: - setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) - except AttributeError: - pass +""" +basic logging functionality based on a producer/consumer scheme. + +XXX implement this API: (maybe put it into slogger.py?) + + log = Logger( + info=py.log.STDOUT, + debug=py.log.STDOUT, + command=None) + log.info("hello", "world") + log.command("hello", "world") + + log = Logger(info=Logger(something=...), + debug=py.log.STDOUT, + command=None) +""" +import py, sys + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + + +class Producer(object): + """ (deprecated) Log producer API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc. Used extensively by PyPy-1.1. + """ + + Message = Message # to allow later customization + keywords2consumer = {} + + def __init__(self, keywords, keywordmapper=None, **kw): + if hasattr(keywords, 'split'): + keywords = tuple(keywords.split()) + self._keywords = keywords + if keywordmapper is None: + keywordmapper = default_keywordmapper + self._keywordmapper = keywordmapper + + def __repr__(self): + return "" % ":".join(self._keywords) + + def __getattr__(self, name): + if '_' in name: + raise AttributeError(name) + producer = self.__class__(self._keywords + (name,)) + setattr(self, name, producer) + return producer + + def __call__(self, *args): + """ write a message to the appropriate consumer(s) """ + func = self._keywordmapper.getconsumer(self._keywords) + if func is not None: + func(self.Message(self._keywords, args)) + +class KeywordMapper: + def __init__(self): + self.keywords2consumer = {} + + def getstate(self): + return self.keywords2consumer.copy() + def setstate(self, state): + self.keywords2consumer.clear() + self.keywords2consumer.update(state) + + def getconsumer(self, keywords): + """ return a consumer matching the given keywords. + + tries to find the most suitable consumer by walking, starting from + the back, the list of keywords, the first consumer matching a + keyword is returned (falling back to py.log.default) + """ + for i in range(len(keywords), 0, -1): + try: + return self.keywords2consumer[keywords[:i]] + except KeyError: + continue + return self.keywords2consumer.get('default', default_consumer) + + def setconsumer(self, keywords, consumer): + """ set a consumer for a set of keywords. """ + # normalize to tuples + if isinstance(keywords, str): + keywords = tuple(filter(None, keywords.split())) + elif hasattr(keywords, '_keywords'): + keywords = keywords._keywords + elif not isinstance(keywords, tuple): + raise TypeError("key %r is not a string or tuple" % (keywords,)) + if consumer is not None and not py.builtin.callable(consumer): + if not hasattr(consumer, 'write'): + raise TypeError( + "%r should be None, callable or file-like" % (consumer,)) + consumer = File(consumer) + self.keywords2consumer[keywords] = consumer + +def default_consumer(msg): + """ the default consumer, prints the message to stdout (using 'print') """ + sys.stderr.write(str(msg)+"\n") + +default_keywordmapper = KeywordMapper() + +def setconsumer(keywords, consumer): + default_keywordmapper.setconsumer(keywords, consumer) + +def setstate(state): + default_keywordmapper.setstate(state) +def getstate(): + return default_keywordmapper.getstate() + +# +# Consumers +# + +class File(object): + """ log consumer wrapping a file(-like) object """ + def __init__(self, f): + assert hasattr(f, 'write') + #assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + self._file.write(str(msg) + "\n") + if hasattr(self._file, 'flush'): + self._file.flush() + +class Path(object): + """ log consumer that opens and writes to a Path """ + def __init__(self, filename, append=False, + delayed_create=False, buffering=False): + self._append = append + self._filename = str(filename) + self._buffering = buffering + if not delayed_create: + self._openfile() + + def _openfile(self): + mode = self._append and 'a' or 'w' + f = open(self._filename, mode) + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + if not hasattr(self, "_file"): + self._openfile() + self._file.write(str(msg) + "\n") + if not self._buffering: + self._file.flush() + +def STDOUT(msg): + """ consumer that writes to sys.stdout """ + sys.stdout.write(str(msg)+"\n") + +def STDERR(msg): + """ consumer that writes to sys.stderr """ + sys.stderr.write(str(msg)+"\n") + +class Syslog: + """ consumer that writes to the syslog daemon """ + + def __init__(self, priority = None): + if priority is None: + priority = self.LOG_INFO + self.priority = priority + + def __call__(self, msg): + """ write a message to the log """ + py.std.syslog.syslog(self.priority, str(msg)) + +for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): + _prio = "LOG_" + _prio + try: + setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) + except AttributeError: + pass diff --git a/third_party/python/py/py/_log/warning.py b/third_party/python/py/py/_log/warning.py index 722e31e910dad..a137fe81ad0a5 100644 --- a/third_party/python/py/py/_log/warning.py +++ b/third_party/python/py/py/_log/warning.py @@ -1,76 +1,76 @@ -import py, sys - -class DeprecationWarning(DeprecationWarning): - def __init__(self, msg, path, lineno): - self.msg = msg - self.path = path - self.lineno = lineno - def __repr__(self): - return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) - def __str__(self): - return self.msg - -def _apiwarn(startversion, msg, stacklevel=2, function=None): - # below is mostly COPIED from python2.4/warnings.py's def warn() - # Get context information - if isinstance(stacklevel, str): - frame = sys._getframe(1) - level = 1 - found = frame.f_code.co_filename.find(stacklevel) != -1 - while frame: - co = frame.f_code - if co.co_filename.find(stacklevel) == -1: - if found: - stacklevel = level - break - else: - found = True - level += 1 - frame = frame.f_back - else: - stacklevel = 1 - msg = "%s (since version %s)" %(msg, startversion) - warn(msg, stacklevel=stacklevel+1, function=function) - -def warn(msg, stacklevel=1, function=None): - if function is not None: - filename = py.std.inspect.getfile(function) - lineno = py.code.getrawcode(function).co_firstlineno - else: - try: - caller = sys._getframe(stacklevel) - except ValueError: - globals = sys.__dict__ - lineno = 1 - else: - globals = caller.f_globals - lineno = caller.f_lineno - if '__name__' in globals: - module = globals['__name__'] - else: - module = "" - filename = globals.get('__file__') - if filename: - fnl = filename.lower() - if fnl.endswith(".pyc") or fnl.endswith(".pyo"): - filename = filename[:-1] - elif fnl.endswith("$py.class"): - filename = filename.replace('$py.class', '.py') - else: - if module == "__main__": - try: - filename = sys.argv[0] - except AttributeError: - # embedded interpreters don't have sys.argv, see bug #839151 - filename = '__main__' - if not filename: - filename = module - path = py.path.local(filename) - warning = DeprecationWarning(msg, path, lineno) - py.std.warnings.warn_explicit(warning, category=Warning, - filename=str(warning.path), - lineno=warning.lineno, - registry=py.std.warnings.__dict__.setdefault( - "__warningsregistry__", {}) - ) - +import py, sys + +class DeprecationWarning(DeprecationWarning): + def __init__(self, msg, path, lineno): + self.msg = msg + self.path = path + self.lineno = lineno + def __repr__(self): + return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) + def __str__(self): + return self.msg + +def _apiwarn(startversion, msg, stacklevel=2, function=None): + # below is mostly COPIED from python2.4/warnings.py's def warn() + # Get context information + if isinstance(stacklevel, str): + frame = sys._getframe(1) + level = 1 + found = frame.f_code.co_filename.find(stacklevel) != -1 + while frame: + co = frame.f_code + if co.co_filename.find(stacklevel) == -1: + if found: + stacklevel = level + break + else: + found = True + level += 1 + frame = frame.f_back + else: + stacklevel = 1 + msg = "%s (since version %s)" %(msg, startversion) + warn(msg, stacklevel=stacklevel+1, function=function) + +def warn(msg, stacklevel=1, function=None): + if function is not None: + filename = py.std.inspect.getfile(function) + lineno = py.code.getrawcode(function).co_firstlineno + else: + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith(".pyc") or fnl.endswith(".pyo"): + filename = filename[:-1] + elif fnl.endswith("$py.class"): + filename = filename.replace('$py.class', '.py') + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + path = py.path.local(filename) + warning = DeprecationWarning(msg, path, lineno) + py.std.warnings.warn_explicit(warning, category=Warning, + filename=str(warning.path), + lineno=warning.lineno, + registry=py.std.warnings.__dict__.setdefault( + "__warningsregistry__", {}) + ) + diff --git a/third_party/python/py/py/_path/__init__.py b/third_party/python/py/py/_path/__init__.py index 51f3246f8070f..7b1ea8a31fe40 100644 --- a/third_party/python/py/py/_path/__init__.py +++ b/third_party/python/py/py/_path/__init__.py @@ -1 +1 @@ -""" unified file system api """ +""" unified file system api """ diff --git a/third_party/python/py/py/_path/cacheutil.py b/third_party/python/py/py/_path/cacheutil.py index 99225047502c6..89ea90b91a437 100644 --- a/third_party/python/py/py/_path/cacheutil.py +++ b/third_party/python/py/py/_path/cacheutil.py @@ -1,114 +1,114 @@ -""" -This module contains multithread-safe cache implementations. - -All Caches have - - getorbuild(key, builder) - delentry(key) - -methods and allow configuration when instantiating the cache class. -""" -from time import time as gettime - -class BasicCache(object): - def __init__(self, maxentries=128): - self.maxentries = maxentries - self.prunenum = int(maxentries - maxentries/8) - self._dict = {} - - def clear(self): - self._dict.clear() - - def _getentry(self, key): - return self._dict[key] - - def _putentry(self, key, entry): - self._prunelowestweight() - self._dict[key] = entry - - def delentry(self, key, raising=False): - try: - del self._dict[key] - except KeyError: - if raising: - raise - - def getorbuild(self, key, builder): - try: - entry = self._getentry(key) - except KeyError: - entry = self._build(key, builder) - self._putentry(key, entry) - return entry.value - - def _prunelowestweight(self): - """ prune out entries with lowest weight. """ - numentries = len(self._dict) - if numentries >= self.maxentries: - # evict according to entry's weight - items = [(entry.weight, key) - for key, entry in self._dict.items()] - items.sort() - index = numentries - self.prunenum - if index > 0: - for weight, key in items[:index]: - # in MT situations the element might be gone - self.delentry(key, raising=False) - -class BuildcostAccessCache(BasicCache): - """ A BuildTime/Access-counting cache implementation. - the weight of a value is computed as the product of - - num-accesses-of-a-value * time-to-build-the-value - - The values with the least such weights are evicted - if the cache maxentries threshold is superceded. - For implementation flexibility more than one object - might be evicted at a time. - """ - # time function to use for measuring build-times - - def _build(self, key, builder): - start = gettime() - val = builder() - end = gettime() - return WeightedCountingEntry(val, end-start) - - -class WeightedCountingEntry(object): - def __init__(self, value, oneweight): - self._value = value - self.weight = self._oneweight = oneweight - - def value(self): - self.weight += self._oneweight - return self._value - value = property(value) - -class AgingCache(BasicCache): - """ This cache prunes out cache entries that are too old. - """ - def __init__(self, maxentries=128, maxseconds=10.0): - super(AgingCache, self).__init__(maxentries) - self.maxseconds = maxseconds - - def _getentry(self, key): - entry = self._dict[key] - if entry.isexpired(): - self.delentry(key) - raise KeyError(key) - return entry - - def _build(self, key, builder): - val = builder() - entry = AgingEntry(val, gettime() + self.maxseconds) - return entry - -class AgingEntry(object): - def __init__(self, value, expirationtime): - self.value = value - self.weight = expirationtime - - def isexpired(self): - t = gettime() - return t >= self.weight +""" +This module contains multithread-safe cache implementations. + +All Caches have + + getorbuild(key, builder) + delentry(key) + +methods and allow configuration when instantiating the cache class. +""" +from time import time as gettime + +class BasicCache(object): + def __init__(self, maxentries=128): + self.maxentries = maxentries + self.prunenum = int(maxentries - maxentries/8) + self._dict = {} + + def clear(self): + self._dict.clear() + + def _getentry(self, key): + return self._dict[key] + + def _putentry(self, key, entry): + self._prunelowestweight() + self._dict[key] = entry + + def delentry(self, key, raising=False): + try: + del self._dict[key] + except KeyError: + if raising: + raise + + def getorbuild(self, key, builder): + try: + entry = self._getentry(key) + except KeyError: + entry = self._build(key, builder) + self._putentry(key, entry) + return entry.value + + def _prunelowestweight(self): + """ prune out entries with lowest weight. """ + numentries = len(self._dict) + if numentries >= self.maxentries: + # evict according to entry's weight + items = [(entry.weight, key) + for key, entry in self._dict.items()] + items.sort() + index = numentries - self.prunenum + if index > 0: + for weight, key in items[:index]: + # in MT situations the element might be gone + self.delentry(key, raising=False) + +class BuildcostAccessCache(BasicCache): + """ A BuildTime/Access-counting cache implementation. + the weight of a value is computed as the product of + + num-accesses-of-a-value * time-to-build-the-value + + The values with the least such weights are evicted + if the cache maxentries threshold is superceded. + For implementation flexibility more than one object + might be evicted at a time. + """ + # time function to use for measuring build-times + + def _build(self, key, builder): + start = gettime() + val = builder() + end = gettime() + return WeightedCountingEntry(val, end-start) + + +class WeightedCountingEntry(object): + def __init__(self, value, oneweight): + self._value = value + self.weight = self._oneweight = oneweight + + def value(self): + self.weight += self._oneweight + return self._value + value = property(value) + +class AgingCache(BasicCache): + """ This cache prunes out cache entries that are too old. + """ + def __init__(self, maxentries=128, maxseconds=10.0): + super(AgingCache, self).__init__(maxentries) + self.maxseconds = maxseconds + + def _getentry(self, key): + entry = self._dict[key] + if entry.isexpired(): + self.delentry(key) + raise KeyError(key) + return entry + + def _build(self, key, builder): + val = builder() + entry = AgingEntry(val, gettime() + self.maxseconds) + return entry + +class AgingEntry(object): + def __init__(self, value, expirationtime): + self.value = value + self.weight = expirationtime + + def isexpired(self): + t = gettime() + return t >= self.weight diff --git a/third_party/python/py/py/_path/common.py b/third_party/python/py/py/_path/common.py index d407434cb2abf..4bc292c0f6b05 100644 --- a/third_party/python/py/py/_path/common.py +++ b/third_party/python/py/py/_path/common.py @@ -1,403 +1,445 @@ -""" -""" -import os, sys, posixpath -import py - -# Moved from local.py. -iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') - -class Checkers: - _depend_on_existence = 'exists', 'link', 'dir', 'file' - - def __init__(self, path): - self.path = path - - def dir(self): - raise NotImplementedError - - def file(self): - raise NotImplementedError - - def dotfile(self): - return self.path.basename.startswith('.') - - def ext(self, arg): - if not arg.startswith('.'): - arg = '.' + arg - return self.path.ext == arg - - def exists(self): - raise NotImplementedError - - def basename(self, arg): - return self.path.basename == arg - - def basestarts(self, arg): - return self.path.basename.startswith(arg) - - def relto(self, arg): - return self.path.relto(arg) - - def fnmatch(self, arg): - return self.path.fnmatch(arg) - - def endswith(self, arg): - return str(self.path).endswith(arg) - - def _evaluate(self, kw): - for name, value in kw.items(): - invert = False - meth = None - try: - meth = getattr(self, name) - except AttributeError: - if name[:3] == 'not': - invert = True - try: - meth = getattr(self, name[3:]) - except AttributeError: - pass - if meth is None: - raise TypeError( - "no %r checker available for %r" % (name, self.path)) - try: - if py.code.getrawcode(meth).co_argcount > 1: - if (not meth(value)) ^ invert: - return False - else: - if bool(value) ^ bool(meth()) ^ invert: - return False - except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY): - # EBUSY feels not entirely correct, - # but its kind of necessary since ENOMEDIUM - # is not accessible in python - for name in self._depend_on_existence: - if name in kw: - if kw.get(name): - return False - name = 'not' + name - if name in kw: - if not kw.get(name): - return False - return True - -class NeverRaised(Exception): - pass - -class PathBase(object): - """ shared implementation for filesystem path objects.""" - Checkers = Checkers - - def __div__(self, other): - return self.join(str(other)) - __truediv__ = __div__ # py3k - - def basename(self): - """ basename part of path. """ - return self._getbyspec('basename')[0] - basename = property(basename, None, None, basename.__doc__) - - def dirname(self): - """ dirname part of path. """ - return self._getbyspec('dirname')[0] - dirname = property(dirname, None, None, dirname.__doc__) - - def purebasename(self): - """ pure base name of the path.""" - return self._getbyspec('purebasename')[0] - purebasename = property(purebasename, None, None, purebasename.__doc__) - - def ext(self): - """ extension of the path (including the '.').""" - return self._getbyspec('ext')[0] - ext = property(ext, None, None, ext.__doc__) - - def dirpath(self, *args, **kwargs): - """ return the directory path joined with any given path arguments. """ - return self.new(basename='').join(*args, **kwargs) - - def read_binary(self): - """ read and return a bytestring from reading the path. """ - with self.open('rb') as f: - return f.read() - - def read_text(self, encoding): - """ read and return a Unicode string from reading the path. """ - with self.open("r", encoding=encoding) as f: - return f.read() - - - def read(self, mode='r'): - """ read and return a bytestring from reading the path. """ - with self.open(mode) as f: - return f.read() - - def readlines(self, cr=1): - """ read and return a list of lines from the path. if cr is False, the -newline will be removed from the end of each line. """ - if not cr: - content = self.read('rU') - return content.split('\n') - else: - f = self.open('rU') - try: - return f.readlines() - finally: - f.close() - - def load(self): - """ (deprecated) return object unpickled from self.read() """ - f = self.open('rb') - try: - return py.error.checked_call(py.std.pickle.load, f) - finally: - f.close() - - def move(self, target): - """ move this path to target. """ - if target.relto(self): - raise py.error.EINVAL(target, - "cannot move path into a subdirectory of itself") - try: - self.rename(target) - except py.error.EXDEV: # invalid cross-device link - self.copy(target) - self.remove() - - def __repr__(self): - """ return a string representation of this path. """ - return repr(str(self)) - - def check(self, **kw): - """ check a path for existence and properties. - - Without arguments, return True if the path exists, otherwise False. - - valid checkers:: - - file=1 # is a file - file=0 # is not a file (may not even exist) - dir=1 # is a dir - link=1 # is a link - exists=1 # exists - - You can specify multiple checker definitions, for example:: - - path.check(file=1, link=1) # a link pointing to a file - """ - if not kw: - kw = {'exists' : 1} - return self.Checkers(self)._evaluate(kw) - - def fnmatch(self, pattern): - """return true if the basename/fullname matches the glob-'pattern'. - - valid pattern characters:: - - * matches everything - ? matches any single character - [seq] matches any character in seq - [!seq] matches any char not in seq - - If the pattern contains a path-separator then the full path - is used for pattern matching and a '*' is prepended to the - pattern. - - if the pattern doesn't contain a path-separator the pattern - is only matched against the basename. - """ - return FNMatcher(pattern)(self) - - def relto(self, relpath): - """ return a string which is the relative part of the path - to the given 'relpath'. - """ - if not isinstance(relpath, (str, PathBase)): - raise TypeError("%r: not a string or path object" %(relpath,)) - strrelpath = str(relpath) - if strrelpath and strrelpath[-1] != self.sep: - strrelpath += self.sep - #assert strrelpath[-1] == self.sep - #assert strrelpath[-2] != self.sep - strself = self.strpath - if sys.platform == "win32" or getattr(os, '_name', None) == 'nt': - if os.path.normcase(strself).startswith( - os.path.normcase(strrelpath)): - return strself[len(strrelpath):] - elif strself.startswith(strrelpath): - return strself[len(strrelpath):] - return "" - - def ensure_dir(self, *args): - """ ensure the path joined with args is a directory. """ - return self.ensure(*args, **{"dir": True}) - - def bestrelpath(self, dest): - """ return a string which is a relative path from self - (assumed to be a directory) to dest such that - self.join(bestrelpath) == dest and if not such - path can be determined return dest. - """ - try: - if self == dest: - return os.curdir - base = self.common(dest) - if not base: # can be the case on windows - return str(dest) - self2base = self.relto(base) - reldest = dest.relto(base) - if self2base: - n = self2base.count(self.sep) + 1 - else: - n = 0 - l = [os.pardir] * n - if reldest: - l.append(reldest) - target = dest.sep.join(l) - return target - except AttributeError: - return str(dest) - - def exists(self): - return self.check() - - def isdir(self): - return self.check(dir=1) - - def isfile(self): - return self.check(file=1) - - def parts(self, reverse=False): - """ return a root-first list of all ancestor directories - plus the path itself. - """ - current = self - l = [self] - while 1: - last = current - current = current.dirpath() - if last == current: - break - l.append(current) - if not reverse: - l.reverse() - return l - - def common(self, other): - """ return the common part shared with the other path - or None if there is no common part. - """ - last = None - for x, y in zip(self.parts(), other.parts()): - if x != y: - return last - last = x - return last - - def __add__(self, other): - """ return new path object with 'other' added to the basename""" - return self.new(basename=self.basename+str(other)) - - def __cmp__(self, other): - """ return sort value (-1, 0, +1). """ - try: - return cmp(self.strpath, other.strpath) - except AttributeError: - return cmp(str(self), str(other)) # self.path, other.path) - - def __lt__(self, other): - try: - return self.strpath < other.strpath - except AttributeError: - return str(self) < str(other) - - def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): - """ yields all paths below the current one - - fil is a filter (glob pattern or callable), if not matching the - path will not be yielded, defaulting to None (everything is - returned) - - rec is a filter (glob pattern or callable) that controls whether - a node is descended, defaulting to None - - ignore is an Exception class that is ignoredwhen calling dirlist() - on any of the paths (by default, all exceptions are reported) - - bf if True will cause a breadthfirst search instead of the - default depthfirst. Default: False - - sort if True will sort entries within each directory level. - """ - for x in Visitor(fil, rec, ignore, bf, sort).gen(self): - yield x - - def _sortlist(self, res, sort): - if sort: - if hasattr(sort, '__call__'): - res.sort(sort) - else: - res.sort() - - def samefile(self, other): - """ return True if other refers to the same stat object as self. """ - return self.strpath == str(other) - -class Visitor: - def __init__(self, fil, rec, ignore, bf, sort): - if isinstance(fil, str): - fil = FNMatcher(fil) - if isinstance(rec, str): - self.rec = FNMatcher(rec) - elif not hasattr(rec, '__call__') and rec: - self.rec = lambda path: True - else: - self.rec = rec - self.fil = fil - self.ignore = ignore - self.breadthfirst = bf - self.optsort = sort and sorted or (lambda x: x) - - def gen(self, path): - try: - entries = path.listdir() - except self.ignore: - return - rec = self.rec - dirs = self.optsort([p for p in entries - if p.check(dir=1) and (rec is None or rec(p))]) - if not self.breadthfirst: - for subdir in dirs: - for p in self.gen(subdir): - yield p - for p in self.optsort(entries): - if self.fil is None or self.fil(p): - yield p - if self.breadthfirst: - for subdir in dirs: - for p in self.gen(subdir): - yield p - -class FNMatcher: - def __init__(self, pattern): - self.pattern = pattern - - def __call__(self, path): - pattern = self.pattern - - if (pattern.find(path.sep) == -1 and - iswin32 and - pattern.find(posixpath.sep) != -1): - # Running on Windows, the pattern has no Windows path separators, - # and the pattern has one or more Posix path separators. Replace - # the Posix path separators with the Windows path separator. - pattern = pattern.replace(posixpath.sep, path.sep) - - if pattern.find(path.sep) == -1: - name = path.basename - else: - name = str(path) # path.strpath # XXX svn? - if not os.path.isabs(pattern): - pattern = '*' + path.sep + pattern - return py.std.fnmatch.fnmatch(name, pattern) - +""" +""" +import os, sys, posixpath +import fnmatch +import py + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') + +try: + from os import fspath +except ImportError: + def fspath(path): + """ + Return the string representation of the path. + If str or bytes is passed in, it is returned unchanged. + This code comes from PEP 519, modified to support earlier versions of + python. + + This is required for python < 3.6. + """ + if isinstance(path, (py.builtin.text, py.builtin.bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + return path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + try: + import pathlib + except ImportError: + pass + else: + if isinstance(path, pathlib.PurePath): + return py.builtin.text(path) + + raise TypeError("expected str, bytes or os.PathLike object, not " + + path_type.__name__) + +class Checkers: + _depend_on_existence = 'exists', 'link', 'dir', 'file' + + def __init__(self, path): + self.path = path + + def dir(self): + raise NotImplementedError + + def file(self): + raise NotImplementedError + + def dotfile(self): + return self.path.basename.startswith('.') + + def ext(self, arg): + if not arg.startswith('.'): + arg = '.' + arg + return self.path.ext == arg + + def exists(self): + raise NotImplementedError + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == 'not': + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError( + "no %r checker available for %r" % (name, self.path)) + try: + if py.code.getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = 'not' + name + if name in kw: + if not kw.get(name): + return False + return True + +class NeverRaised(Exception): + pass + +class PathBase(object): + """ shared implementation for filesystem path objects.""" + Checkers = Checkers + + def __div__(self, other): + return self.join(fspath(other)) + __truediv__ = __div__ # py3k + + def basename(self): + """ basename part of path. """ + return self._getbyspec('basename')[0] + basename = property(basename, None, None, basename.__doc__) + + def dirname(self): + """ dirname part of path. """ + return self._getbyspec('dirname')[0] + dirname = property(dirname, None, None, dirname.__doc__) + + def purebasename(self): + """ pure base name of the path.""" + return self._getbyspec('purebasename')[0] + purebasename = property(purebasename, None, None, purebasename.__doc__) + + def ext(self): + """ extension of the path (including the '.').""" + return self._getbyspec('ext')[0] + ext = property(ext, None, None, ext.__doc__) + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + return self.new(basename='').join(*args, **kwargs) + + def read_binary(self): + """ read and return a bytestring from reading the path. """ + with self.open('rb') as f: + return f.read() + + def read_text(self, encoding): + """ read and return a Unicode string from reading the path. """ + with self.open("r", encoding=encoding) as f: + return f.read() + + + def read(self, mode='r'): + """ read and return a bytestring from reading the path. """ + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """ read and return a list of lines from the path. if cr is False, the +newline will be removed from the end of each line. """ + if sys.version_info < (3, ): + mode = 'rU' + else: # python 3 deprecates mode "U" in favor of "newline" option + mode = 'r' + + if not cr: + content = self.read(mode) + return content.split('\n') + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """ (deprecated) return object unpickled from self.read() """ + f = self.open('rb') + try: + return py.error.checked_call(py.std.pickle.load, f) + finally: + f.close() + + def move(self, target): + """ move this path to target. """ + if target.relto(self): + raise py.error.EINVAL(target, + "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except py.error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def __repr__(self): + """ return a string representation of this path. """ + return repr(str(self)) + + def check(self, **kw): + """ check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file=1 # is a file + file=0 # is not a file (may not even exist) + dir=1 # is a dir + link=1 # is a link + exists=1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + kw = {'exists' : 1} + return self.Checkers(self)._evaluate(kw) + + def fnmatch(self, pattern): + """return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """ return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, PathBase)): + raise TypeError("%r: not a string or path object" %(relpath,)) + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + #assert strrelpath[-1] == self.sep + #assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, '_name', None) == 'nt': + if os.path.normcase(strself).startswith( + os.path.normcase(strrelpath)): + return strself[len(strrelpath):] + elif strself.startswith(strrelpath): + return strself[len(strrelpath):] + return "" + + def ensure_dir(self, *args): + """ ensure the path joined with args is a directory. """ + return self.ensure(*args, **{"dir": True}) + + def bestrelpath(self, dest): + """ return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + l = [os.pardir] * n + if reldest: + l.append(reldest) + target = dest.sep.join(l) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """ return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + l = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + l.append(current) + if not reverse: + l.reverse() + return l + + def common(self, other): + """ return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """ return new path object with 'other' added to the basename""" + return self.new(basename=self.basename+str(other)) + + def __cmp__(self, other): + """ return sort value (-1, 0, +1). """ + try: + return cmp(self.strpath, other.strpath) + except AttributeError: + return cmp(str(self), str(other)) # self.path, other.path) + + def __lt__(self, other): + try: + return self.strpath < other.strpath + except AttributeError: + return str(self) < str(other) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """ yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + for x in Visitor(fil, rec, ignore, bf, sort).gen(self): + yield x + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, '__call__'): + res.sort(sort) + else: + res.sort() + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return self.strpath == str(other) + + def __fspath__(self): + return self.strpath + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, py.builtin._basestring): + fil = FNMatcher(fil) + if isinstance(rec, py.builtin._basestring): + self.rec = FNMatcher(rec) + elif not hasattr(rec, '__call__') and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = sort and sorted or (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort([p for p in entries + if p.check(dir=1) and (rec is None or rec(p))]) + if not self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if (pattern.find(path.sep) == -1 and + iswin32 and + pattern.find(posixpath.sep) != -1): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = '*' + path.sep + pattern + return fnmatch.fnmatch(name, pattern) diff --git a/third_party/python/py/py/_path/local.py b/third_party/python/py/py/_path/local.py index d569404ec21e0..264f1156118d1 100644 --- a/third_party/python/py/py/_path/local.py +++ b/third_party/python/py/py/_path/local.py @@ -1,911 +1,930 @@ -""" -local path implementation. -""" -from __future__ import with_statement - -from contextlib import contextmanager -import sys, os, re, atexit, io -import py -from py._path import common -from py._path.common import iswin32 -from stat import S_ISLNK, S_ISDIR, S_ISREG - -from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname - -if sys.version_info > (3,0): - def map_as_list(func, iter): - return list(map(func, iter)) -else: - map_as_list = map - -class Stat(object): - def __getattr__(self, name): - return getattr(self._osstatresult, "st_" + name) - - def __init__(self, path, osstatresult): - self.path = path - self._osstatresult = osstatresult - - @property - def owner(self): - if iswin32: - raise NotImplementedError("XXX win32") - import pwd - entry = py.error.checked_call(pwd.getpwuid, self.uid) - return entry[0] - - @property - def group(self): - """ return group name of file. """ - if iswin32: - raise NotImplementedError("XXX win32") - import grp - entry = py.error.checked_call(grp.getgrgid, self.gid) - return entry[0] - - def isdir(self): - return S_ISDIR(self._osstatresult.st_mode) - - def isfile(self): - return S_ISREG(self._osstatresult.st_mode) - - def islink(self): - st = self.path.lstat() - return S_ISLNK(self._osstatresult.st_mode) - -class PosixPath(common.PathBase): - def chown(self, user, group, rec=0): - """ change ownership to the given user and group. - user and group may be specified by a number or - by a name. if rec is True change ownership - recursively. - """ - uid = getuserid(user) - gid = getgroupid(group) - if rec: - for x in self.visit(rec=lambda x: x.check(link=0)): - if x.check(link=0): - py.error.checked_call(os.chown, str(x), uid, gid) - py.error.checked_call(os.chown, str(self), uid, gid) - - def readlink(self): - """ return value of a symbolic link. """ - return py.error.checked_call(os.readlink, self.strpath) - - def mklinkto(self, oldname): - """ posix style hard link to another name. """ - py.error.checked_call(os.link, str(oldname), str(self)) - - def mksymlinkto(self, value, absolute=1): - """ create a symbolic link with the given value (pointing to another name). """ - if absolute: - py.error.checked_call(os.symlink, str(value), self.strpath) - else: - base = self.common(value) - # with posix local paths '/' is always a common base - relsource = self.__class__(value).relto(base) - reldest = self.relto(base) - n = reldest.count(self.sep) - target = self.sep.join(('..', )*n + (relsource, )) - py.error.checked_call(os.symlink, target, self.strpath) - -def getuserid(user): - import pwd - if not isinstance(user, int): - user = pwd.getpwnam(user)[2] - return user - -def getgroupid(group): - import grp - if not isinstance(group, int): - group = grp.getgrnam(group)[2] - return group - -FSBase = not iswin32 and PosixPath or common.PathBase - -class LocalPath(FSBase): - """ object oriented interface to os.path and other local filesystem - related information. - """ - class ImportMismatchError(ImportError): - """ raised on pyimport() if there is a mismatch of __file__'s""" - - sep = os.sep - class Checkers(common.Checkers): - def _stat(self): - try: - return self._statcache - except AttributeError: - try: - self._statcache = self.path.stat() - except py.error.ELOOP: - self._statcache = self.path.lstat() - return self._statcache - - def dir(self): - return S_ISDIR(self._stat().mode) - - def file(self): - return S_ISREG(self._stat().mode) - - def exists(self): - return self._stat() - - def link(self): - st = self.path.lstat() - return S_ISLNK(st.mode) - - def __init__(self, path=None, expanduser=False): - """ Initialize and return a local Path instance. - - Path can be relative to the current directory. - If path is None it defaults to the current working directory. - If expanduser is True, tilde-expansion is performed. - Note that Path instances always carry an absolute path. - Note also that passing in a local path object will simply return - the exact same path object. Use new() to get a new copy. - """ - if path is None: - self.strpath = py.error.checked_call(os.getcwd) - elif isinstance(path, common.PathBase): - self.strpath = path.strpath - elif isinstance(path, py.builtin._basestring): - if expanduser: - path = os.path.expanduser(path) - self.strpath = abspath(path) - else: - raise ValueError("can only pass None, Path instances " - "or non-empty strings to LocalPath") - - def __hash__(self): - return hash(self.strpath) - - def __eq__(self, other): - s1 = self.strpath - s2 = getattr(other, "strpath", other) - if iswin32: - s1 = s1.lower() - try: - s2 = s2.lower() - except AttributeError: - return False - return s1 == s2 - - def __ne__(self, other): - return not (self == other) - - def __lt__(self, other): - return self.strpath < getattr(other, "strpath", other) - - def __gt__(self, other): - return self.strpath > getattr(other, "strpath", other) - - def samefile(self, other): - """ return True if 'other' references the same file as 'self'. - """ - other = getattr(other, "strpath", other) - if not isabs(other): - other = abspath(other) - if self == other: - return True - if iswin32: - return False # there is no samefile - return py.error.checked_call( - os.path.samefile, self.strpath, other) - - def remove(self, rec=1, ignore_errors=False): - """ remove a file or directory (or a directory tree if rec=1). - if ignore_errors is True, errors while removing directories will - be ignored. - """ - if self.check(dir=1, link=0): - if rec: - # force remove of readonly files on windows - if iswin32: - self.chmod(448, rec=1) # octcal 0700 - py.error.checked_call(py.std.shutil.rmtree, self.strpath, - ignore_errors=ignore_errors) - else: - py.error.checked_call(os.rmdir, self.strpath) - else: - if iswin32: - self.chmod(448) # octcal 0700 - py.error.checked_call(os.remove, self.strpath) - - def computehash(self, hashtype="md5", chunksize=524288): - """ return hexdigest of hashvalue for this file. """ - try: - try: - import hashlib as mod - except ImportError: - if hashtype == "sha1": - hashtype = "sha" - mod = __import__(hashtype) - hash = getattr(mod, hashtype)() - except (AttributeError, ImportError): - raise ValueError("Don't know how to compute %r hash" %(hashtype,)) - f = self.open('rb') - try: - while 1: - buf = f.read(chunksize) - if not buf: - return hash.hexdigest() - hash.update(buf) - finally: - f.close() - - def new(self, **kw): - """ create a modified version of this path. - the following keyword arguments modify various path parts:: - - a:/some/path/to/a/file.ext - xx drive - xxxxxxxxxxxxxxxxx dirname - xxxxxxxx basename - xxxx purebasename - xxx ext - """ - obj = object.__new__(self.__class__) - if not kw: - obj.strpath = self.strpath - return obj - drive, dirname, basename, purebasename,ext = self._getbyspec( - "drive,dirname,basename,purebasename,ext") - if 'basename' in kw: - if 'purebasename' in kw or 'ext' in kw: - raise ValueError("invalid specification %r" % kw) - else: - pb = kw.setdefault('purebasename', purebasename) - try: - ext = kw['ext'] - except KeyError: - pass - else: - if ext and not ext.startswith('.'): - ext = '.' + ext - kw['basename'] = pb + ext - - if ('dirname' in kw and not kw['dirname']): - kw['dirname'] = drive - else: - kw.setdefault('dirname', dirname) - kw.setdefault('sep', self.sep) - obj.strpath = normpath( - "%(dirname)s%(sep)s%(basename)s" % kw) - return obj - - def _getbyspec(self, spec): - """ see new for what 'spec' can be. """ - res = [] - parts = self.strpath.split(self.sep) - - args = filter(None, spec.split(',') ) - append = res.append - for name in args: - if name == 'drive': - append(parts[0]) - elif name == 'dirname': - append(self.sep.join(parts[:-1])) - else: - basename = parts[-1] - if name == 'basename': - append(basename) - else: - i = basename.rfind('.') - if i == -1: - purebasename, ext = basename, '' - else: - purebasename, ext = basename[:i], basename[i:] - if name == 'purebasename': - append(purebasename) - elif name == 'ext': - append(ext) - else: - raise ValueError("invalid part specification %r" % name) - return res - - def dirpath(self, *args, **kwargs): - """ return the directory path joined with any given path arguments. """ - if not kwargs: - path = object.__new__(self.__class__) - path.strpath = dirname(self.strpath) - if args: - path = path.join(*args) - return path - return super(LocalPath, self).dirpath(*args, **kwargs) - - def join(self, *args, **kwargs): - """ return a new path by appending all 'args' as path - components. if abs=1 is used restart from root if any - of the args is an absolute path. - """ - sep = self.sep - strargs = [getattr(arg, "strpath", arg) for arg in args] - strpath = self.strpath - if kwargs.get('abs'): - newargs = [] - for arg in reversed(strargs): - if isabs(arg): - strpath = arg - strargs = newargs - break - newargs.insert(0, arg) - for arg in strargs: - arg = arg.strip(sep) - if iswin32: - # allow unix style paths even on windows. - arg = arg.strip('/') - arg = arg.replace('/', sep) - strpath = strpath + sep + arg - obj = object.__new__(self.__class__) - obj.strpath = normpath(strpath) - return obj - - def open(self, mode='r', ensure=False, encoding=None): - """ return an opened file with the given mode. - - If ensure is True, create parent directories if needed. - """ - if ensure: - self.dirpath().ensure(dir=1) - if encoding: - return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding) - return py.error.checked_call(open, self.strpath, mode) - - def _fastjoin(self, name): - child = object.__new__(self.__class__) - child.strpath = self.strpath + self.sep + name - return child - - def islink(self): - return islink(self.strpath) - - def check(self, **kw): - if not kw: - return exists(self.strpath) - if len(kw) == 1: - if "dir" in kw: - return not kw["dir"] ^ isdir(self.strpath) - if "file" in kw: - return not kw["file"] ^ isfile(self.strpath) - return super(LocalPath, self).check(**kw) - - _patternchars = set("*?[" + os.path.sep) - def listdir(self, fil=None, sort=None): - """ list directory contents, possibly filter by the given fil func - and possibly sorted. - """ - if fil is None and sort is None: - names = py.error.checked_call(os.listdir, self.strpath) - return map_as_list(self._fastjoin, names) - if isinstance(fil, py.builtin._basestring): - if not self._patternchars.intersection(fil): - child = self._fastjoin(fil) - if exists(child.strpath): - return [child] - return [] - fil = common.FNMatcher(fil) - names = py.error.checked_call(os.listdir, self.strpath) - res = [] - for name in names: - child = self._fastjoin(name) - if fil is None or fil(child): - res.append(child) - self._sortlist(res, sort) - return res - - def size(self): - """ return size of the underlying file object """ - return self.stat().size - - def mtime(self): - """ return last modification time of the path. """ - return self.stat().mtime - - def copy(self, target, mode=False): - """ copy path to target.""" - if self.check(file=1): - if target.check(dir=1): - target = target.join(self.basename) - assert self!=target - copychunked(self, target) - if mode: - copymode(self.strpath, target.strpath) - else: - def rec(p): - return p.check(link=0) - for x in self.visit(rec=rec): - relpath = x.relto(self) - newx = target.join(relpath) - newx.dirpath().ensure(dir=1) - if x.check(link=1): - newx.mksymlinkto(x.readlink()) - continue - elif x.check(file=1): - copychunked(x, newx) - elif x.check(dir=1): - newx.ensure(dir=1) - if mode: - copymode(x.strpath, newx.strpath) - - def rename(self, target): - """ rename this path to target. """ - target = getattr(target, "strpath", target) - return py.error.checked_call(os.rename, self.strpath, target) - - def dump(self, obj, bin=1): - """ pickle object into path location""" - f = self.open('wb') - try: - py.error.checked_call(py.std.pickle.dump, obj, f, bin) - finally: - f.close() - - def mkdir(self, *args): - """ create & return the directory joined with args. """ - p = self.join(*args) - py.error.checked_call(os.mkdir, getattr(p, "strpath", p)) - return p - - def write_binary(self, data, ensure=False): - """ write binary data into path. If ensure is True create - missing parent directories. - """ - if ensure: - self.dirpath().ensure(dir=1) - with self.open('wb') as f: - f.write(data) - - def write_text(self, data, encoding, ensure=False): - """ write text data into path using the specified encoding. - If ensure is True create missing parent directories. - """ - if ensure: - self.dirpath().ensure(dir=1) - with self.open('w', encoding=encoding) as f: - f.write(data) - - def write(self, data, mode='w', ensure=False): - """ write data into path. If ensure is True create - missing parent directories. - """ - if ensure: - self.dirpath().ensure(dir=1) - if 'b' in mode: - if not py.builtin._isbytes(data): - raise ValueError("can only process bytes") - else: - if not py.builtin._istext(data): - if not py.builtin._isbytes(data): - data = str(data) - else: - data = py.builtin._totext(data, sys.getdefaultencoding()) - f = self.open(mode) - try: - f.write(data) - finally: - f.close() - - def _ensuredirs(self): - parent = self.dirpath() - if parent == self: - return self - if parent.check(dir=0): - parent._ensuredirs() - if self.check(dir=0): - try: - self.mkdir() - except py.error.EEXIST: - # race condition: file/dir created by another thread/process. - # complain if it is not a dir - if self.check(dir=0): - raise - return self - - def ensure(self, *args, **kwargs): - """ ensure that an args-joined path exists (by default as - a file). if you specify a keyword argument 'dir=True' - then the path is forced to be a directory path. - """ - p = self.join(*args) - if kwargs.get('dir', 0): - return p._ensuredirs() - else: - p.dirpath()._ensuredirs() - if not p.check(file=1): - p.open('w').close() - return p - - def stat(self, raising=True): - """ Return an os.stat() tuple. """ - if raising == True: - return Stat(self, py.error.checked_call(os.stat, self.strpath)) - try: - return Stat(self, os.stat(self.strpath)) - except KeyboardInterrupt: - raise - except Exception: - return None - - def lstat(self): - """ Return an os.lstat() tuple. """ - return Stat(self, py.error.checked_call(os.lstat, self.strpath)) - - def setmtime(self, mtime=None): - """ set modification time for the given path. if 'mtime' is None - (the default) then the file's mtime is set to current time. - - Note that the resolution for 'mtime' is platform dependent. - """ - if mtime is None: - return py.error.checked_call(os.utime, self.strpath, mtime) - try: - return py.error.checked_call(os.utime, self.strpath, (-1, mtime)) - except py.error.EINVAL: - return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) - - def chdir(self): - """ change directory to self and return old current directory """ - try: - old = self.__class__() - except py.error.ENOENT: - old = None - py.error.checked_call(os.chdir, self.strpath) - return old - - - @contextmanager - def as_cwd(self): - """ return context manager which changes to current dir during the - managed "with" context. On __enter__ it returns the old dir. - """ - old = self.chdir() - try: - yield old - finally: - old.chdir() - - def realpath(self): - """ return a new path which contains no symbolic links.""" - return self.__class__(os.path.realpath(self.strpath)) - - def atime(self): - """ return last access time of the path. """ - return self.stat().atime - - def __repr__(self): - return 'local(%r)' % self.strpath - - def __str__(self): - """ return string representation of the Path. """ - return self.strpath - - def chmod(self, mode, rec=0): - """ change permissions to the given mode. If mode is an - integer it directly encodes the os-specific modes. - if rec is True perform recursively. - """ - if not isinstance(mode, int): - raise TypeError("mode %r must be an integer" % (mode,)) - if rec: - for x in self.visit(rec=rec): - py.error.checked_call(os.chmod, str(x), mode) - py.error.checked_call(os.chmod, self.strpath, mode) - - def pypkgpath(self): - """ return the Python package path by looking for the last - directory upwards which still contains an __init__.py. - Return None if a pkgpath can not be determined. - """ - pkgpath = None - for parent in self.parts(reverse=True): - if parent.isdir(): - if not parent.join('__init__.py').exists(): - break - if not isimportable(parent.basename): - break - pkgpath = parent - return pkgpath - - def _ensuresyspath(self, ensuremode, path): - if ensuremode: - s = str(path) - if ensuremode == "append": - if s not in sys.path: - sys.path.append(s) - else: - if s != sys.path[0]: - sys.path.insert(0, s) - - def pyimport(self, modname=None, ensuresyspath=True): - """ return path as an imported python module. - - If modname is None, look for the containing package - and construct an according module name. - The module will be put/looked up in sys.modules. - if ensuresyspath is True then the root dir for importing - the file (taking __init__.py files into account) will - be prepended to sys.path if it isn't there already. - If ensuresyspath=="append" the root dir will be appended - if it isn't already contained in sys.path. - if ensuresyspath is False no modification of syspath happens. - """ - if not self.check(): - raise py.error.ENOENT(self) - - pkgpath = None - if modname is None: - pkgpath = self.pypkgpath() - if pkgpath is not None: - pkgroot = pkgpath.dirpath() - names = self.new(ext="").relto(pkgroot).split(self.sep) - if names[-1] == "__init__": - names.pop() - modname = ".".join(names) - else: - pkgroot = self.dirpath() - modname = self.purebasename - - self._ensuresyspath(ensuresyspath, pkgroot) - __import__(modname) - mod = sys.modules[modname] - if self.basename == "__init__.py": - return mod # we don't check anything as we might - # we in a namespace package ... too icky to check - modfile = mod.__file__ - if modfile[-4:] in ('.pyc', '.pyo'): - modfile = modfile[:-1] - elif modfile.endswith('$py.class'): - modfile = modfile[:-9] + '.py' - if modfile.endswith(os.path.sep + "__init__.py"): - if self.basename != "__init__.py": - modfile = modfile[:-12] - try: - issame = self.samefile(modfile) - except py.error.ENOENT: - issame = False - if not issame: - raise self.ImportMismatchError(modname, modfile, self) - return mod - else: - try: - return sys.modules[modname] - except KeyError: - # we have a custom modname, do a pseudo-import - mod = py.std.types.ModuleType(modname) - mod.__file__ = str(self) - sys.modules[modname] = mod - try: - py.builtin.execfile(str(self), mod.__dict__) - except: - del sys.modules[modname] - raise - return mod - - def sysexec(self, *argv, **popen_opts): - """ return stdout text from executing a system child process, - where the 'self' path points to executable. - The process is directly invoked and not through a system shell. - """ - from subprocess import Popen, PIPE - argv = map_as_list(str, argv) - popen_opts['stdout'] = popen_opts['stderr'] = PIPE - proc = Popen([str(self)] + argv, **popen_opts) - stdout, stderr = proc.communicate() - ret = proc.wait() - if py.builtin._isbytes(stdout): - stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) - if ret != 0: - if py.builtin._isbytes(stderr): - stderr = py.builtin._totext(stderr, sys.getdefaultencoding()) - raise py.process.cmdexec.Error(ret, ret, str(self), - stdout, stderr,) - return stdout - - def sysfind(cls, name, checker=None, paths=None): - """ return a path object found by looking at the systems - underlying PATH specification. If the checker is not None - it will be invoked to filter matching paths. If a binary - cannot be found, None is returned - Note: This is probably not working on plain win32 systems - but may work on cygwin. - """ - if isabs(name): - p = py.path.local(name) - if p.check(file=1): - return p - else: - if paths is None: - if iswin32: - paths = py.std.os.environ['Path'].split(';') - if '' not in paths and '.' not in paths: - paths.append('.') - try: - systemroot = os.environ['SYSTEMROOT'] - except KeyError: - pass - else: - paths = [re.sub('%SystemRoot%', systemroot, path) - for path in paths] - else: - paths = py.std.os.environ['PATH'].split(':') - tryadd = [] - if iswin32: - tryadd += os.environ['PATHEXT'].split(os.pathsep) - tryadd.append("") - - for x in paths: - for addext in tryadd: - p = py.path.local(x).join(name, abs=True) + addext - try: - if p.check(file=1): - if checker: - if not checker(p): - continue - return p - except py.error.EACCES: - pass - return None - sysfind = classmethod(sysfind) - - def _gethomedir(cls): - try: - x = os.environ['HOME'] - except KeyError: - try: - x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH'] - except KeyError: - return None - return cls(x) - _gethomedir = classmethod(_gethomedir) - - #""" - #special class constructors for local filesystem paths - #""" - def get_temproot(cls): - """ return the system's temporary directory - (where tempfiles are usually created in) - """ - return py.path.local(py.std.tempfile.gettempdir()) - get_temproot = classmethod(get_temproot) - - def mkdtemp(cls, rootdir=None): - """ return a Path object pointing to a fresh new temporary directory - (which we created ourself). - """ - import tempfile - if rootdir is None: - rootdir = cls.get_temproot() - return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) - mkdtemp = classmethod(mkdtemp) - - def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, - lock_timeout = 172800): # two days - """ return unique directory with a number greater than the current - maximum one. The number is assumed to start directly after prefix. - if keep is true directories with a number less than (maxnum-keep) - will be removed. - """ - if rootdir is None: - rootdir = cls.get_temproot() - - def parse_num(path): - """ parse the number out of a path (if it matches the prefix) """ - bn = path.basename - if bn.startswith(prefix): - try: - return int(bn[len(prefix):]) - except ValueError: - pass - - # compute the maximum number currently in use with the - # prefix - lastmax = None - while True: - maxnum = -1 - for path in rootdir.listdir(): - num = parse_num(path) - if num is not None: - maxnum = max(maxnum, num) - - # make the new directory - try: - udir = rootdir.mkdir(prefix + str(maxnum+1)) - except py.error.EEXIST: - # race condition: another thread/process created the dir - # in the meantime. Try counting again - if lastmax == maxnum: - raise - lastmax = maxnum - continue - break - - # put a .lock file in the new directory that will be removed at - # process exit - if lock_timeout: - lockfile = udir.join('.lock') - mypid = os.getpid() - if hasattr(lockfile, 'mksymlinkto'): - lockfile.mksymlinkto(str(mypid)) - else: - lockfile.write(str(mypid)) - def try_remove_lockfile(): - # in a fork() situation, only the last process should - # remove the .lock, otherwise the other processes run the - # risk of seeing their temporary dir disappear. For now - # we remove the .lock in the parent only (i.e. we assume - # that the children finish before the parent). - if os.getpid() != mypid: - return - try: - lockfile.remove() - except py.error.Error: - pass - atexit.register(try_remove_lockfile) - - # prune old directories - if keep: - for path in rootdir.listdir(): - num = parse_num(path) - if num is not None and num <= (maxnum - keep): - lf = path.join('.lock') - try: - t1 = lf.lstat().mtime - t2 = lockfile.lstat().mtime - if not lock_timeout or abs(t2-t1) < lock_timeout: - continue # skip directories still locked - except py.error.Error: - pass # assume that it means that there is no 'lf' - try: - path.remove(rec=1) - except KeyboardInterrupt: - raise - except: # this might be py.error.Error, WindowsError ... - pass - - # make link... - try: - username = os.environ['USER'] #linux, et al - except KeyError: - try: - username = os.environ['USERNAME'] #windows - except KeyError: - username = 'current' - - src = str(udir) - dest = src[:src.rfind('-')] + '-' + username - try: - os.unlink(dest) - except OSError: - pass - try: - os.symlink(src, dest) - except (OSError, AttributeError, NotImplementedError): - pass - - return udir - make_numbered_dir = classmethod(make_numbered_dir) - -def copymode(src, dest): - py.std.shutil.copymode(src, dest) - -def copychunked(src, dest): - chunksize = 524288 # half a meg of bytes - fsrc = src.open('rb') - try: - fdest = dest.open('wb') - try: - while 1: - buf = fsrc.read(chunksize) - if not buf: - break - fdest.write(buf) - finally: - fdest.close() - finally: - fsrc.close() - -def isimportable(name): - if name and (name[0].isalpha() or name[0] == '_'): - name = name.replace("_", '') - return not name or name.isalnum() +""" +local path implementation. +""" +from __future__ import with_statement + +from contextlib import contextmanager +import sys, os, re, atexit, io +import py +from py._path import common +from py._path.common import iswin32, fspath +from stat import S_ISLNK, S_ISDIR, S_ISREG + +from os.path import abspath, normcase, normpath, isabs, exists, isdir, isfile, islink, dirname + +if sys.version_info > (3,0): + def map_as_list(func, iter): + return list(map(func, iter)) +else: + map_as_list = map + +class Stat(object): + def __getattr__(self, name): + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + entry = py.error.checked_call(pwd.getpwuid, self.uid) + return entry[0] + + @property + def group(self): + """ return group name of file. """ + if iswin32: + raise NotImplementedError("XXX win32") + import grp + entry = py.error.checked_call(grp.getgrgid, self.gid) + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + st = self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + +class PosixPath(common.PathBase): + def chown(self, user, group, rec=0): + """ change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + py.error.checked_call(os.chown, str(x), uid, gid) + py.error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self): + """ return value of a symbolic link. """ + return py.error.checked_call(os.readlink, self.strpath) + + def mklinkto(self, oldname): + """ posix style hard link to another name. """ + py.error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """ create a symbolic link with the given value (pointing to another name). """ + if absolute: + py.error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(('..', )*n + (relsource, )) + py.error.checked_call(os.symlink, target, self.strpath) + +def getuserid(user): + import pwd + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] + return user + +def getgroupid(group): + import grp + if not isinstance(group, int): + group = grp.getgrnam(group)[2] + return group + +FSBase = not iswin32 and PosixPath or common.PathBase + +class LocalPath(FSBase): + """ object oriented interface to os.path and other local filesystem + related information. + """ + class ImportMismatchError(ImportError): + """ raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + class Checkers(common.Checkers): + def _stat(self): + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except py.error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + def __init__(self, path=None, expanduser=False): + """ Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = py.error.checked_call(os.getcwd) + else: + try: + path = fspath(path) + except TypeError: + raise ValueError("can only pass None, Path instances " + "or non-empty strings to LocalPath") + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + def __hash__(self): + return hash(self.strpath) + + def __eq__(self, other): + s1 = fspath(self) + try: + s2 = fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return fspath(self) < fspath(other) + + def __gt__(self, other): + return fspath(self) > fspath(other) + + def samefile(self, other): + """ return True if 'other' references the same file as 'self'. + """ + other = fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if iswin32: + return False # there is no samefile + return py.error.checked_call( + os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """ remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + py.error.checked_call(py.std.shutil.rmtree, self.strpath, + ignore_errors=ignore_errors) + else: + py.error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + py.error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """ return hexdigest of hashvalue for this file. """ + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError("Don't know how to compute %r hash" %(hashtype,)) + f = self.open('rb') + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """ create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename,ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + try: + ext = kw['ext'] + except KeyError: + pass + else: + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + if ('dirname' in kw and not kw['dirname']): + kw['dirname'] = drive + else: + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + obj.strpath = normpath( + "%(dirname)s%(sep)s%(basename)s" % kw) + return obj + + def _getbyspec(self, spec): + """ see new for what 'spec' can be. """ + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(',') ) + append = res.append + for name in args: + if name == 'drive': + append(parts[0]) + elif name == 'dirname': + append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == 'basename': + append(basename) + else: + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + append(purebasename) + elif name == 'ext': + append(ext) + else: + raise ValueError("invalid part specification %r" % name) + return res + + def dirpath(self, *args, **kwargs): + """ return the directory path joined with any given path arguments. """ + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return super(LocalPath, self).dirpath(*args, **kwargs) + + def join(self, *args, **kwargs): + """ return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [fspath(arg) for arg in args] + strpath = self.strpath + if kwargs.get('abs'): + newargs = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip('/') + arg = arg.replace('/', sep) + strpath = strpath + sep + arg + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode='r', ensure=False, encoding=None): + """ return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding) + return py.error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + return super(LocalPath, self).check(**kw) + + _patternchars = set("*?[" + os.path.sep) + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = py.error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, py.builtin._basestring): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = common.FNMatcher(fil) + names = py.error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self): + """ return size of the underlying file object """ + return self.stat().size + + def mtime(self): + """ return last modification time of the path. """ + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """ copy path to target. + + If mode is True, will copy copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self!=target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + def rec(p): + return p.check(link=0) + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """ rename this path to target. """ + target = fspath(target) + return py.error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """ pickle object into path location""" + f = self.open('wb') + try: + py.error.checked_call(py.std.pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + p = self.join(*args) + py.error.checked_call(os.mkdir, fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """ write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open('wb') as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """ write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open('w', encoding=encoding) as f: + f.write(data) + + def write(self, data, mode='w', ensure=False): + """ write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if 'b' in mode: + if not py.builtin._isbytes(data): + raise ValueError("can only process bytes") + else: + if not py.builtin._istext(data): + if not py.builtin._isbytes(data): + data = str(data) + else: + data = py.builtin._totext(data, sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except py.error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get('dir', 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open('w').close() + return p + + def stat(self, raising=True): + """ Return an os.stat() tuple. """ + if raising == True: + return Stat(self, py.error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self): + """ Return an os.lstat() tuple. """ + return Stat(self, py.error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """ set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return py.error.checked_call(os.utime, self.strpath, mtime) + try: + return py.error.checked_call(os.utime, self.strpath, (-1, mtime)) + except py.error.EINVAL: + return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """ change directory to self and return old current directory """ + try: + old = self.__class__() + except py.error.ENOENT: + old = None + py.error.checked_call(os.chdir, self.strpath) + return old + + + @contextmanager + def as_cwd(self): + """ return context manager which changes to current dir during the + managed "with" context. On __enter__ it returns the old dir. + """ + old = self.chdir() + try: + yield old + finally: + old.chdir() + + def realpath(self): + """ return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """ return last access time of the path. """ + return self.stat().atime + + def __repr__(self): + return 'local(%r)' % self.strpath + + def __str__(self): + """ return string representation of the Path. """ + return self.strpath + + def chmod(self, mode, rec=0): + """ change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError("mode %r must be an integer" % (mode,)) + if rec: + for x in self.visit(rec=rec): + py.error.checked_call(os.chmod, str(x), mode) + py.error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """ return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath can not be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join('__init__.py').exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """ return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + """ + if not self.check(): + raise py.error.ENOENT(self) + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # we in a namespace package ... too icky to check + modfile = mod.__file__ + if modfile[-4:] in ('.pyc', '.pyo'): + modfile = modfile[:-1] + elif modfile.endswith('$py.class'): + modfile = modfile[:-9] + '.py' + if modfile.endswith(os.path.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except py.error.ENOENT: + issame = False + if not issame: + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + mod = py.std.types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + py.builtin.execfile(str(self), mod.__dict__) + except: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv, **popen_opts): + """ return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import Popen, PIPE + argv = map_as_list(str, argv) + popen_opts['stdout'] = popen_opts['stderr'] = PIPE + proc = Popen([str(self)] + argv, **popen_opts) + stdout, stderr = proc.communicate() + ret = proc.wait() + if py.builtin._isbytes(stdout): + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + if ret != 0: + if py.builtin._isbytes(stderr): + stderr = py.builtin._totext(stderr, sys.getdefaultencoding()) + raise py.process.cmdexec.Error(ret, ret, str(self), + stdout, stderr,) + return stdout + + def sysfind(cls, name, checker=None, paths=None): + """ return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = py.path.local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = py.std.os.environ['Path'].split(';') + if '' not in paths and '.' not in paths: + paths.append('.') + try: + systemroot = os.environ['SYSTEMROOT'] + except KeyError: + pass + else: + paths = [re.sub('%SystemRoot%', systemroot, path) + for path in paths] + else: + paths = py.std.os.environ['PATH'].split(':') + tryadd = [] + if iswin32: + tryadd += os.environ['PATHEXT'].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = py.path.local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except py.error.EACCES: + pass + return None + sysfind = classmethod(sysfind) + + def _gethomedir(cls): + try: + x = os.environ['HOME'] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH'] + except KeyError: + return None + return cls(x) + _gethomedir = classmethod(_gethomedir) + + #""" + #special class constructors for local filesystem paths + #""" + def get_temproot(cls): + """ return the system's temporary directory + (where tempfiles are usually created in) + """ + return py.path.local(py.std.tempfile.gettempdir()) + get_temproot = classmethod(get_temproot) + + def mkdtemp(cls, rootdir=None): + """ return a Path object pointing to a fresh new temporary directory + (which we created ourself). + """ + import tempfile + if rootdir is None: + rootdir = cls.get_temproot() + return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) + mkdtemp = classmethod(mkdtemp) + + def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, + lock_timeout = 172800): # two days + """ return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = normcase(prefix) + def parse_num(path): + """ parse the number out of a path (if it matches the prefix) """ + nbasename = normcase(path.basename) + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix):]) + except ValueError: + pass + + # compute the maximum number currently in use with the + # prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum+1)) + except py.error.EEXIST: + # race condition: another thread/process created the dir + # in the meantime. Try counting again + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + # put a .lock file in the new directory that will be removed at + # process exit + if lock_timeout: + lockfile = udir.join('.lock') + mypid = os.getpid() + if hasattr(lockfile, 'mksymlinkto'): + lockfile.mksymlinkto(str(mypid)) + else: + lockfile.write(str(mypid)) + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except py.error.Error: + pass + atexit.register(try_remove_lockfile) + + # prune old directories + if keep: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + lf = path.join('.lock') + try: + t1 = lf.lstat().mtime + t2 = lockfile.lstat().mtime + if not lock_timeout or abs(t2-t1) < lock_timeout: + continue # skip directories still locked + except py.error.Error: + pass # assume that it means that there is no 'lf' + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except: # this might be py.error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = 'current' + + src = str(udir) + dest = src[:src.rfind('-')] + '-' + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + make_numbered_dir = classmethod(make_numbered_dir) + + +def copymode(src, dest): + """ copy permission from src to dst. """ + py.std.shutil.copymode(src, dest) + +def copystat(src, dest): + """ copy permission, last modification time, last access time, and flags from src to dst.""" + py.std.shutil.copystat(str(src), str(dest)) + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open('rb') + try: + fdest = dest.open('wb') + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == '_'): + name = name.replace("_", '') + return not name or name.isalnum() diff --git a/third_party/python/py/py/_path/svnurl.py b/third_party/python/py/py/_path/svnurl.py index 78d71317ac07f..5f45fa2f831e8 100644 --- a/third_party/python/py/py/_path/svnurl.py +++ b/third_party/python/py/py/_path/svnurl.py @@ -1,380 +1,380 @@ -""" -module defining a subversion path object based on the external -command 'svn'. This modules aims to work with svn 1.3 and higher -but might also interact well with earlier versions. -""" - -import os, sys, time, re -import py -from py import path, process -from py._path import common -from py._path import svnwc as svncommon -from py._path.cacheutil import BuildcostAccessCache, AgingCache - -DEBUG=False - -class SvnCommandPath(svncommon.SvnPathBase): - """ path implementation that offers access to (possibly remote) subversion - repositories. """ - - _lsrevcache = BuildcostAccessCache(maxentries=128) - _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) - - def __new__(cls, path, rev=None, auth=None): - self = object.__new__(cls) - if isinstance(path, cls): - rev = path.rev - auth = path.auth - path = path.strpath - svncommon.checkbadchars(path) - path = path.rstrip('/') - self.strpath = path - self.rev = rev - self.auth = auth - return self - - def __repr__(self): - if self.rev == -1: - return 'svnurl(%r)' % self.strpath - else: - return 'svnurl(%r, %r)' % (self.strpath, self.rev) - - def _svnwithrev(self, cmd, *args): - """ execute an svn command, append our own url and revision """ - if self.rev is None: - return self._svnwrite(cmd, *args) - else: - args = ['-r', self.rev] + list(args) - return self._svnwrite(cmd, *args) - - def _svnwrite(self, cmd, *args): - """ execute an svn command, append our own url """ - l = ['svn %s' % cmd] - args = ['"%s"' % self._escape(item) for item in args] - l.extend(args) - l.append('"%s"' % self._encodedurl()) - # fixing the locale because we can't otherwise parse - string = " ".join(l) - if DEBUG: - print("execing %s" % string) - out = self._svncmdexecauth(string) - return out - - def _svncmdexecauth(self, cmd): - """ execute an svn command 'as is' """ - cmd = svncommon.fixlocale() + cmd - if self.auth is not None: - cmd += ' ' + self.auth.makecmdoptions() - return self._cmdexec(cmd) - - def _cmdexec(self, cmd): - try: - out = process.cmdexec(cmd) - except py.process.cmdexec.Error: - e = sys.exc_info()[1] - if (e.err.find('File Exists') != -1 or - e.err.find('File already exists') != -1): - raise py.error.EEXIST(self) - raise - return out - - def _svnpopenauth(self, cmd): - """ execute an svn command, return a pipe for reading stdin """ - cmd = svncommon.fixlocale() + cmd - if self.auth is not None: - cmd += ' ' + self.auth.makecmdoptions() - return self._popen(cmd) - - def _popen(self, cmd): - return os.popen(cmd) - - def _encodedurl(self): - return self._escape(self.strpath) - - def _norev_delentry(self, path): - auth = self.auth and self.auth.makecmdoptions() or None - self._lsnorevcache.delentry((str(path), auth)) - - def open(self, mode='r'): - """ return an opened file with the given mode. """ - if mode not in ("r", "rU",): - raise ValueError("mode %r not supported" % (mode,)) - assert self.check(file=1) # svn cat returns an empty file otherwise - if self.rev is None: - return self._svnpopenauth('svn cat "%s"' % ( - self._escape(self.strpath), )) - else: - return self._svnpopenauth('svn cat -r %s "%s"' % ( - self.rev, self._escape(self.strpath))) - - def dirpath(self, *args, **kwargs): - """ return the directory path of the current path joined - with any given path arguments. - """ - l = self.strpath.split(self.sep) - if len(l) < 4: - raise py.error.EINVAL(self, "base is not valid") - elif len(l) == 4: - return self.join(*args, **kwargs) - else: - return self.new(basename='').join(*args, **kwargs) - - # modifying methods (cache must be invalidated) - def mkdir(self, *args, **kwargs): - """ create & return the directory joined with args. - pass a 'msg' keyword argument to set the commit message. - """ - commit_msg = kwargs.get('msg', "mkdir by py lib invocation") - createpath = self.join(*args) - createpath._svnwrite('mkdir', '-m', commit_msg) - self._norev_delentry(createpath.dirpath()) - return createpath - - def copy(self, target, msg='copied by py lib invocation'): - """ copy path to target with checkin message msg.""" - if getattr(target, 'rev', None) is not None: - raise py.error.EINVAL(target, "revisions are immutable") - self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, - self._escape(self), self._escape(target))) - self._norev_delentry(target.dirpath()) - - def rename(self, target, msg="renamed by py lib invocation"): - """ rename this path to target with checkin message msg. """ - if getattr(self, 'rev', None) is not None: - raise py.error.EINVAL(self, "revisions are immutable") - self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( - msg, self._escape(self), self._escape(target))) - self._norev_delentry(self.dirpath()) - self._norev_delentry(self) - - def remove(self, rec=1, msg='removed by py lib invocation'): - """ remove a file or directory (or a directory tree if rec=1) with -checkin message msg.""" - if self.rev is not None: - raise py.error.EINVAL(self, "revisions are immutable") - self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) - self._norev_delentry(self.dirpath()) - - def export(self, topath): - """ export to a local path - - topath should not exist prior to calling this, returns a - py.path.local instance - """ - topath = py.path.local(topath) - args = ['"%s"' % (self._escape(self),), - '"%s"' % (self._escape(topath),)] - if self.rev is not None: - args = ['-r', str(self.rev)] + args - self._svncmdexecauth('svn export %s' % (' '.join(args),)) - return topath - - def ensure(self, *args, **kwargs): - """ ensure that an args-joined path exists (by default as - a file). If you specify a keyword argument 'dir=True' - then the path is forced to be a directory path. - """ - if getattr(self, 'rev', None) is not None: - raise py.error.EINVAL(self, "revisions are immutable") - target = self.join(*args) - dir = kwargs.get('dir', 0) - for x in target.parts(reverse=True): - if x.check(): - break - else: - raise py.error.ENOENT(target, "has not any valid base!") - if x == target: - if not x.check(dir=dir): - raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) - return x - tocreate = target.relto(x) - basename = tocreate.split(self.sep, 1)[0] - tempdir = py.path.local.mkdtemp() - try: - tempdir.ensure(tocreate, dir=dir) - cmd = 'svn import -m "%s" "%s" "%s"' % ( - "ensure %s" % self._escape(tocreate), - self._escape(tempdir.join(basename)), - x.join(basename)._encodedurl()) - self._svncmdexecauth(cmd) - self._norev_delentry(x) - finally: - tempdir.remove() - return target - - # end of modifying methods - def _propget(self, name): - res = self._svnwithrev('propget', name) - return res[:-1] # strip trailing newline - - def _proplist(self): - res = self._svnwithrev('proplist') - lines = res.split('\n') - lines = [x.strip() for x in lines[1:]] - return svncommon.PropListDict(self, lines) - - def info(self): - """ return an Info structure with svn-provided information. """ - parent = self.dirpath() - nameinfo_seq = parent._listdir_nameinfo() - bn = self.basename - for name, info in nameinfo_seq: - if name == bn: - return info - raise py.error.ENOENT(self) - - - def _listdir_nameinfo(self): - """ return sequence of name-info directory entries of self """ - def builder(): - try: - res = self._svnwithrev('ls', '-v') - except process.cmdexec.Error: - e = sys.exc_info()[1] - if e.err.find('non-existent in that revision') != -1: - raise py.error.ENOENT(self, e.err) - elif e.err.find("E200009:") != -1: - raise py.error.ENOENT(self, e.err) - elif e.err.find('File not found') != -1: - raise py.error.ENOENT(self, e.err) - elif e.err.find('not part of a repository')!=-1: - raise py.error.ENOENT(self, e.err) - elif e.err.find('Unable to open')!=-1: - raise py.error.ENOENT(self, e.err) - elif e.err.lower().find('method not allowed')!=-1: - raise py.error.EACCES(self, e.err) - raise py.error.Error(e.err) - lines = res.split('\n') - nameinfo_seq = [] - for lsline in lines: - if lsline: - info = InfoSvnCommand(lsline) - if info._name != '.': # svn 1.5 produces '.' dirs, - nameinfo_seq.append((info._name, info)) - nameinfo_seq.sort() - return nameinfo_seq - auth = self.auth and self.auth.makecmdoptions() or None - if self.rev is not None: - return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), - builder) - else: - return self._lsnorevcache.getorbuild((self.strpath, auth), - builder) - - def listdir(self, fil=None, sort=None): - """ list directory contents, possibly filter by the given fil func - and possibly sorted. - """ - if isinstance(fil, str): - fil = common.FNMatcher(fil) - nameinfo_seq = self._listdir_nameinfo() - if len(nameinfo_seq) == 1: - name, info = nameinfo_seq[0] - if name == self.basename and info.kind == 'file': - #if not self.check(dir=1): - raise py.error.ENOTDIR(self) - paths = [self.join(name) for (name, info) in nameinfo_seq] - if fil: - paths = [x for x in paths if fil(x)] - self._sortlist(paths, sort) - return paths - - - def log(self, rev_start=None, rev_end=1, verbose=False): - """ return a list of LogEntry instances for this path. -rev_start is the starting revision (defaulting to the first one). -rev_end is the last revision (defaulting to HEAD). -if verbose is True, then the LogEntry instances also know which files changed. -""" - assert self.check() #make it simpler for the pipe - rev_start = rev_start is None and "HEAD" or rev_start - rev_end = rev_end is None and "HEAD" or rev_end - - if rev_start == "HEAD" and rev_end == 1: - rev_opt = "" - else: - rev_opt = "-r %s:%s" % (rev_start, rev_end) - verbose_opt = verbose and "-v" or "" - xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % - (rev_opt, verbose_opt, self.strpath)) - from xml.dom import minidom - tree = minidom.parse(xmlpipe) - result = [] - for logentry in filter(None, tree.firstChild.childNodes): - if logentry.nodeType == logentry.ELEMENT_NODE: - result.append(svncommon.LogEntry(logentry)) - return result - -#01234567890123456789012345678901234567890123467 -# 2256 hpk 165 Nov 24 17:55 __init__.py -# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! -# 1312 johnny 1627 May 05 14:32 test_decorators.py -# -class InfoSvnCommand: - # the '0?' part in the middle is an indication of whether the resource is - # locked, see 'svn help ls' - lspattern = re.compile( - r'^ *(?P\d+) +(?P.+?) +(0? *(?P\d+))? ' - '*(?P\w+ +\d{2} +[\d:]+) +(?P.*)$') - def __init__(self, line): - # this is a typical line from 'svn ls http://...' - #_ 1127 jum 0 Jul 13 15:28 branch/ - match = self.lspattern.match(line) - data = match.groupdict() - self._name = data['file'] - if self._name[-1] == '/': - self._name = self._name[:-1] - self.kind = 'dir' - else: - self.kind = 'file' - #self.has_props = l.pop(0) == 'P' - self.created_rev = int(data['rev']) - self.last_author = data['author'] - self.size = data['size'] and int(data['size']) or 0 - self.mtime = parse_time_with_missing_year(data['date']) - self.time = self.mtime * 1000000 - - def __eq__(self, other): - return self.__dict__ == other.__dict__ - - -#____________________________________________________ -# -# helper functions -#____________________________________________________ -def parse_time_with_missing_year(timestr): - """ analyze the time part from a single line of "svn ls -v" - the svn output doesn't show the year makes the 'timestr' - ambigous. - """ - import calendar - t_now = time.gmtime() - - tparts = timestr.split() - month = time.strptime(tparts.pop(0), '%b')[1] - day = time.strptime(tparts.pop(0), '%d')[2] - last = tparts.pop(0) # year or hour:minute - try: - if ":" in last: - raise ValueError() - year = time.strptime(last, '%Y')[0] - hour = minute = 0 - except ValueError: - hour, minute = time.strptime(last, '%H:%M')[3:5] - year = t_now[0] - - t_result = (year, month, day, hour, minute, 0,0,0,0) - if t_result > t_now: - year -= 1 - t_result = (year, month, day, hour, minute, 0,0,0,0) - return calendar.timegm(t_result) - -class PathEntry: - def __init__(self, ppart): - self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') - self.action = ppart.getAttribute('action').encode('UTF-8') - if self.action == 'A': - self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') - if self.copyfrom_path: - self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) - +""" +module defining a subversion path object based on the external +command 'svn'. This modules aims to work with svn 1.3 and higher +but might also interact well with earlier versions. +""" + +import os, sys, time, re +import py +from py import path, process +from py._path import common +from py._path import svnwc as svncommon +from py._path.cacheutil import BuildcostAccessCache, AgingCache + +DEBUG=False + +class SvnCommandPath(svncommon.SvnPathBase): + """ path implementation that offers access to (possibly remote) subversion + repositories. """ + + _lsrevcache = BuildcostAccessCache(maxentries=128) + _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) + + def __new__(cls, path, rev=None, auth=None): + self = object.__new__(cls) + if isinstance(path, cls): + rev = path.rev + auth = path.auth + path = path.strpath + svncommon.checkbadchars(path) + path = path.rstrip('/') + self.strpath = path + self.rev = rev + self.auth = auth + return self + + def __repr__(self): + if self.rev == -1: + return 'svnurl(%r)' % self.strpath + else: + return 'svnurl(%r, %r)' % (self.strpath, self.rev) + + def _svnwithrev(self, cmd, *args): + """ execute an svn command, append our own url and revision """ + if self.rev is None: + return self._svnwrite(cmd, *args) + else: + args = ['-r', self.rev] + list(args) + return self._svnwrite(cmd, *args) + + def _svnwrite(self, cmd, *args): + """ execute an svn command, append our own url """ + l = ['svn %s' % cmd] + args = ['"%s"' % self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._encodedurl()) + # fixing the locale because we can't otherwise parse + string = " ".join(l) + if DEBUG: + print("execing %s" % string) + out = self._svncmdexecauth(string) + return out + + def _svncmdexecauth(self, cmd): + """ execute an svn command 'as is' """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._cmdexec(cmd) + + def _cmdexec(self, cmd): + try: + out = process.cmdexec(cmd) + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if (e.err.find('File Exists') != -1 or + e.err.find('File already exists') != -1): + raise py.error.EEXIST(self) + raise + return out + + def _svnpopenauth(self, cmd): + """ execute an svn command, return a pipe for reading stdin """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._popen(cmd) + + def _popen(self, cmd): + return os.popen(cmd) + + def _encodedurl(self): + return self._escape(self.strpath) + + def _norev_delentry(self, path): + auth = self.auth and self.auth.makecmdoptions() or None + self._lsnorevcache.delentry((str(path), auth)) + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + if mode not in ("r", "rU",): + raise ValueError("mode %r not supported" % (mode,)) + assert self.check(file=1) # svn cat returns an empty file otherwise + if self.rev is None: + return self._svnpopenauth('svn cat "%s"' % ( + self._escape(self.strpath), )) + else: + return self._svnpopenauth('svn cat -r %s "%s"' % ( + self.rev, self._escape(self.strpath))) + + def dirpath(self, *args, **kwargs): + """ return the directory path of the current path joined + with any given path arguments. + """ + l = self.strpath.split(self.sep) + if len(l) < 4: + raise py.error.EINVAL(self, "base is not valid") + elif len(l) == 4: + return self.join(*args, **kwargs) + else: + return self.new(basename='').join(*args, **kwargs) + + # modifying methods (cache must be invalidated) + def mkdir(self, *args, **kwargs): + """ create & return the directory joined with args. + pass a 'msg' keyword argument to set the commit message. + """ + commit_msg = kwargs.get('msg', "mkdir by py lib invocation") + createpath = self.join(*args) + createpath._svnwrite('mkdir', '-m', commit_msg) + self._norev_delentry(createpath.dirpath()) + return createpath + + def copy(self, target, msg='copied by py lib invocation'): + """ copy path to target with checkin message msg.""" + if getattr(target, 'rev', None) is not None: + raise py.error.EINVAL(target, "revisions are immutable") + self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, + self._escape(self), self._escape(target))) + self._norev_delentry(target.dirpath()) + + def rename(self, target, msg="renamed by py lib invocation"): + """ rename this path to target with checkin message msg. """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( + msg, self._escape(self), self._escape(target))) + self._norev_delentry(self.dirpath()) + self._norev_delentry(self) + + def remove(self, rec=1, msg='removed by py lib invocation'): + """ remove a file or directory (or a directory tree if rec=1) with +checkin message msg.""" + if self.rev is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) + self._norev_delentry(self.dirpath()) + + def export(self, topath): + """ export to a local path + + topath should not exist prior to calling this, returns a + py.path.local instance + """ + topath = py.path.local(topath) + args = ['"%s"' % (self._escape(self),), + '"%s"' % (self._escape(topath),)] + if self.rev is not None: + args = ['-r', str(self.rev)] + args + self._svncmdexecauth('svn export %s' % (' '.join(args),)) + return topath + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). If you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + target = self.join(*args) + dir = kwargs.get('dir', 0) + for x in target.parts(reverse=True): + if x.check(): + break + else: + raise py.error.ENOENT(target, "has not any valid base!") + if x == target: + if not x.check(dir=dir): + raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) + return x + tocreate = target.relto(x) + basename = tocreate.split(self.sep, 1)[0] + tempdir = py.path.local.mkdtemp() + try: + tempdir.ensure(tocreate, dir=dir) + cmd = 'svn import -m "%s" "%s" "%s"' % ( + "ensure %s" % self._escape(tocreate), + self._escape(tempdir.join(basename)), + x.join(basename)._encodedurl()) + self._svncmdexecauth(cmd) + self._norev_delentry(x) + finally: + tempdir.remove() + return target + + # end of modifying methods + def _propget(self, name): + res = self._svnwithrev('propget', name) + return res[:-1] # strip trailing newline + + def _proplist(self): + res = self._svnwithrev('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return svncommon.PropListDict(self, lines) + + def info(self): + """ return an Info structure with svn-provided information. """ + parent = self.dirpath() + nameinfo_seq = parent._listdir_nameinfo() + bn = self.basename + for name, info in nameinfo_seq: + if name == bn: + return info + raise py.error.ENOENT(self) + + + def _listdir_nameinfo(self): + """ return sequence of name-info directory entries of self """ + def builder(): + try: + res = self._svnwithrev('ls', '-v') + except process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('non-existent in that revision') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("E200009:") != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('File not found') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('not part of a repository')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('Unable to open')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.lower().find('method not allowed')!=-1: + raise py.error.EACCES(self, e.err) + raise py.error.Error(e.err) + lines = res.split('\n') + nameinfo_seq = [] + for lsline in lines: + if lsline: + info = InfoSvnCommand(lsline) + if info._name != '.': # svn 1.5 produces '.' dirs, + nameinfo_seq.append((info._name, info)) + nameinfo_seq.sort() + return nameinfo_seq + auth = self.auth and self.auth.makecmdoptions() or None + if self.rev is not None: + return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), + builder) + else: + return self._lsnorevcache.getorbuild((self.strpath, auth), + builder) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + nameinfo_seq = self._listdir_nameinfo() + if len(nameinfo_seq) == 1: + name, info = nameinfo_seq[0] + if name == self.basename and info.kind == 'file': + #if not self.check(dir=1): + raise py.error.ENOTDIR(self) + paths = [self.join(name) for (name, info) in nameinfo_seq] + if fil: + paths = [x for x in paths if fil(x)] + self._sortlist(paths, sort) + return paths + + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() #make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % + (rev_opt, verbose_opt, self.strpath)) + from xml.dom import minidom + tree = minidom.parse(xmlpipe) + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(svncommon.LogEntry(logentry)) + return result + +#01234567890123456789012345678901234567890123467 +# 2256 hpk 165 Nov 24 17:55 __init__.py +# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! +# 1312 johnny 1627 May 05 14:32 test_decorators.py +# +class InfoSvnCommand: + # the '0?' part in the middle is an indication of whether the resource is + # locked, see 'svn help ls' + lspattern = re.compile( + r'^ *(?P\d+) +(?P.+?) +(0? *(?P\d+))? ' + r'*(?P\w+ +\d{2} +[\d:]+) +(?P.*)$') + def __init__(self, line): + # this is a typical line from 'svn ls http://...' + #_ 1127 jum 0 Jul 13 15:28 branch/ + match = self.lspattern.match(line) + data = match.groupdict() + self._name = data['file'] + if self._name[-1] == '/': + self._name = self._name[:-1] + self.kind = 'dir' + else: + self.kind = 'file' + #self.has_props = l.pop(0) == 'P' + self.created_rev = int(data['rev']) + self.last_author = data['author'] + self.size = data['size'] and int(data['size']) or 0 + self.mtime = parse_time_with_missing_year(data['date']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +#____________________________________________________ +# +# helper functions +#____________________________________________________ +def parse_time_with_missing_year(timestr): + """ analyze the time part from a single line of "svn ls -v" + the svn output doesn't show the year makes the 'timestr' + ambigous. + """ + import calendar + t_now = time.gmtime() + + tparts = timestr.split() + month = time.strptime(tparts.pop(0), '%b')[1] + day = time.strptime(tparts.pop(0), '%d')[2] + last = tparts.pop(0) # year or hour:minute + try: + if ":" in last: + raise ValueError() + year = time.strptime(last, '%Y')[0] + hour = minute = 0 + except ValueError: + hour, minute = time.strptime(last, '%H:%M')[3:5] + year = t_now[0] + + t_result = (year, month, day, hour, minute, 0,0,0,0) + if t_result > t_now: + year -= 1 + t_result = (year, month, day, hour, minute, 0,0,0,0) + return calendar.timegm(t_result) + +class PathEntry: + def __init__(self, ppart): + self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') + self.action = ppart.getAttribute('action').encode('UTF-8') + if self.action == 'A': + self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') + if self.copyfrom_path: + self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) + diff --git a/third_party/python/py/py/_path/svnwc.py b/third_party/python/py/py/_path/svnwc.py index 00d3b4bbaf362..07233b0bb79d1 100644 --- a/third_party/python/py/py/_path/svnwc.py +++ b/third_party/python/py/py/_path/svnwc.py @@ -1,1240 +1,1240 @@ -""" -svn-Command based Implementation of a Subversion WorkingCopy Path. - - SvnWCCommandPath is the main class. - -""" - -import os, sys, time, re, calendar -import py -import subprocess -from py._path import common - -#----------------------------------------------------------- -# Caching latest repository revision and repo-paths -# (getting them is slow with the current implementations) -# -# XXX make mt-safe -#----------------------------------------------------------- - -class cache: - proplist = {} - info = {} - entries = {} - prop = {} - -class RepoEntry: - def __init__(self, url, rev, timestamp): - self.url = url - self.rev = rev - self.timestamp = timestamp - - def __str__(self): - return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) - -class RepoCache: - """ The Repocache manages discovered repository paths - and their revisions. If inside a timeout the cache - will even return the revision of the root. - """ - timeout = 20 # seconds after which we forget that we know the last revision - - def __init__(self): - self.repos = [] - - def clear(self): - self.repos = [] - - def put(self, url, rev, timestamp=None): - if rev is None: - return - if timestamp is None: - timestamp = time.time() - - for entry in self.repos: - if url == entry.url: - entry.timestamp = timestamp - entry.rev = rev - #print "set repo", entry - break - else: - entry = RepoEntry(url, rev, timestamp) - self.repos.append(entry) - #print "appended repo", entry - - def get(self, url): - now = time.time() - for entry in self.repos: - if url.startswith(entry.url): - if now < entry.timestamp + self.timeout: - #print "returning immediate Etrny", entry - return entry.url, entry.rev - return entry.url, -1 - return url, -1 - -repositories = RepoCache() - - -# svn support code - -ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested -if sys.platform == "win32": - ALLOWED_CHARS += ":" -ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' - -def _getsvnversion(ver=[]): - try: - return ver[0] - except IndexError: - v = py.process.cmdexec("svn -q --version") - v.strip() - v = '.'.join(v.split('.')[:2]) - ver.append(v) - return v - -def _escape_helper(text): - text = str(text) - if py.std.sys.platform != 'win32': - text = str(text).replace('$', '\\$') - return text - -def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): - for c in str(text): - if c.isalnum(): - continue - if c in allowed_chars: - continue - return True - return False - -def checkbadchars(url): - # (hpk) not quite sure about the exact purpose, guido w.? - proto, uri = url.split("://", 1) - if proto != "file": - host, uripath = uri.split('/', 1) - # only check for bad chars in the non-protocol parts - if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ - or _check_for_bad_chars(uripath, ALLOWED_CHARS)): - raise ValueError("bad char in %r" % (url, )) - - -#_______________________________________________________________ - -class SvnPathBase(common.PathBase): - """ Base implementation for SvnPath implementations. """ - sep = '/' - - def _geturl(self): - return self.strpath - url = property(_geturl, None, None, "url of this svn-path.") - - def __str__(self): - """ return a string representation (including rev-number) """ - return self.strpath - - def __hash__(self): - return hash(self.strpath) - - def new(self, **kw): - """ create a modified version of this path. A 'rev' argument - indicates a new revision. - the following keyword arguments modify various path parts:: - - http://host.com/repo/path/file.ext - |-----------------------| dirname - |------| basename - |--| purebasename - |--| ext - """ - obj = object.__new__(self.__class__) - obj.rev = kw.get('rev', self.rev) - obj.auth = kw.get('auth', self.auth) - dirname, basename, purebasename, ext = self._getbyspec( - "dirname,basename,purebasename,ext") - if 'basename' in kw: - if 'purebasename' in kw or 'ext' in kw: - raise ValueError("invalid specification %r" % kw) - else: - pb = kw.setdefault('purebasename', purebasename) - ext = kw.setdefault('ext', ext) - if ext and not ext.startswith('.'): - ext = '.' + ext - kw['basename'] = pb + ext - - kw.setdefault('dirname', dirname) - kw.setdefault('sep', self.sep) - if kw['basename']: - obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw - else: - obj.strpath = "%(dirname)s" % kw - return obj - - def _getbyspec(self, spec): - """ get specified parts of the path. 'arg' is a string - with comma separated path parts. The parts are returned - in exactly the order of the specification. - - you may specify the following parts: - - http://host.com/repo/path/file.ext - |-----------------------| dirname - |------| basename - |--| purebasename - |--| ext - """ - res = [] - parts = self.strpath.split(self.sep) - for name in spec.split(','): - name = name.strip() - if name == 'dirname': - res.append(self.sep.join(parts[:-1])) - elif name == 'basename': - res.append(parts[-1]) - else: - basename = parts[-1] - i = basename.rfind('.') - if i == -1: - purebasename, ext = basename, '' - else: - purebasename, ext = basename[:i], basename[i:] - if name == 'purebasename': - res.append(purebasename) - elif name == 'ext': - res.append(ext) - else: - raise NameError("Don't know part %r" % name) - return res - - def __eq__(self, other): - """ return true if path and rev attributes each match """ - return (str(self) == str(other) and - (self.rev == other.rev or self.rev == other.rev)) - - def __ne__(self, other): - return not self == other - - def join(self, *args): - """ return a new Path (with the same revision) which is composed - of the self Path followed by 'args' path components. - """ - if not args: - return self - - args = tuple([arg.strip(self.sep) for arg in args]) - parts = (self.strpath, ) + args - newpath = self.__class__(self.sep.join(parts), self.rev, self.auth) - return newpath - - def propget(self, name): - """ return the content of the given property. """ - value = self._propget(name) - return value - - def proplist(self): - """ list all property names. """ - content = self._proplist() - return content - - def size(self): - """ Return the size of the file content of the Path. """ - return self.info().size - - def mtime(self): - """ Return the last modification time of the file. """ - return self.info().mtime - - # shared help methods - - def _escape(self, cmd): - return _escape_helper(cmd) - - - #def _childmaxrev(self): - # """ return maximum revision number of childs (or self.rev if no childs) """ - # rev = self.rev - # for name, info in self._listdir_nameinfo(): - # rev = max(rev, info.created_rev) - # return rev - - #def _getlatestrevision(self): - # """ return latest repo-revision for this path. """ - # url = self.strpath - # path = self.__class__(url, None) - # - # # we need a long walk to find the root-repo and revision - # while 1: - # try: - # rev = max(rev, path._childmaxrev()) - # previous = path - # path = path.dirpath() - # except (IOError, process.cmdexec.Error): - # break - # if rev is None: - # raise IOError, "could not determine newest repo revision for %s" % self - # return rev - - class Checkers(common.Checkers): - def dir(self): - try: - return self.path.info().kind == 'dir' - except py.error.Error: - return self._listdirworks() - - def _listdirworks(self): - try: - self.path.listdir() - except py.error.ENOENT: - return False - else: - return True - - def file(self): - try: - return self.path.info().kind == 'file' - except py.error.ENOENT: - return False - - def exists(self): - try: - return self.path.info() - except py.error.ENOENT: - return self._listdirworks() - -def parse_apr_time(timestr): - i = timestr.rfind('.') - if i == -1: - raise ValueError("could not parse %s" % timestr) - timestr = timestr[:i] - parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") - return time.mktime(parsedtime) - -class PropListDict(dict): - """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" - def __init__(self, path, keynames): - dict.__init__(self, [(x, None) for x in keynames]) - self.path = path - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - if value is None: - value = self.path.propget(key) - dict.__setitem__(self, key, value) - return value - -def fixlocale(): - if sys.platform != 'win32': - return 'LC_ALL=C ' - return '' - -# some nasty chunk of code to solve path and url conversion and quoting issues -ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ') -if os.sep in ILLEGAL_CHARS: - ILLEGAL_CHARS.remove(os.sep) -ISWINDOWS = sys.platform == 'win32' -_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) -def _check_path(path): - illegal = ILLEGAL_CHARS[:] - sp = path.strpath - if ISWINDOWS: - illegal.remove(':') - if not _reg_allow_disk.match(sp): - raise ValueError('path may not contain a colon (:)') - for char in sp: - if char not in string.printable or char in illegal: - raise ValueError('illegal character %r in path' % (char,)) - -def path_to_fspath(path, addat=True): - _check_path(path) - sp = path.strpath - if addat and path.rev != -1: - sp = '%s@%s' % (sp, path.rev) - elif addat: - sp = '%s@HEAD' % (sp,) - return sp - -def url_from_path(path): - fspath = path_to_fspath(path, False) - quote = py.std.urllib.quote - if ISWINDOWS: - match = _reg_allow_disk.match(fspath) - fspath = fspath.replace('\\', '/') - if match.group(1): - fspath = '/%s%s' % (match.group(1).replace('\\', '/'), - quote(fspath[len(match.group(1)):])) - else: - fspath = quote(fspath) - else: - fspath = quote(fspath) - if path.rev != -1: - fspath = '%s@%s' % (fspath, path.rev) - else: - fspath = '%s@HEAD' % (fspath,) - return 'file://%s' % (fspath,) - -class SvnAuth(object): - """ container for auth information for Subversion """ - def __init__(self, username, password, cache_auth=True, interactive=True): - self.username = username - self.password = password - self.cache_auth = cache_auth - self.interactive = interactive - - def makecmdoptions(self): - uname = self.username.replace('"', '\\"') - passwd = self.password.replace('"', '\\"') - ret = [] - if uname: - ret.append('--username="%s"' % (uname,)) - if passwd: - ret.append('--password="%s"' % (passwd,)) - if not self.cache_auth: - ret.append('--no-auth-cache') - if not self.interactive: - ret.append('--non-interactive') - return ' '.join(ret) - - def __str__(self): - return "" %(self.username,) - -rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') - -class SvnWCCommandPath(common.PathBase): - """ path implementation offering access/modification to svn working copies. - It has methods similar to the functions in os.path and similar to the - commands of the svn client. - """ - sep = os.sep - - def __new__(cls, wcpath=None, auth=None): - self = object.__new__(cls) - if isinstance(wcpath, cls): - if wcpath.__class__ == cls: - return wcpath - wcpath = wcpath.localpath - if _check_for_bad_chars(str(wcpath), - ALLOWED_CHARS): - raise ValueError("bad char in wcpath %s" % (wcpath, )) - self.localpath = py.path.local(wcpath) - self.auth = auth - return self - - strpath = property(lambda x: str(x.localpath), None, None, "string path") - rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision") - - def __eq__(self, other): - return self.localpath == getattr(other, 'localpath', None) - - def _geturl(self): - if getattr(self, '_url', None) is None: - info = self.info() - self._url = info.url #SvnPath(info.url, info.rev) - assert isinstance(self._url, py.builtin._basestring) - return self._url - - url = property(_geturl, None, None, "url of this WC item") - - def _escape(self, cmd): - return _escape_helper(cmd) - - def dump(self, obj): - """ pickle object into path location""" - return self.localpath.dump(obj) - - def svnurl(self): - """ return current SvnPath for this WC-item. """ - info = self.info() - return py.path.svnurl(info.url) - - def __repr__(self): - return "svnwc(%r)" % (self.strpath) # , self._url) - - def __str__(self): - return str(self.localpath) - - def _makeauthoptions(self): - if self.auth is None: - return '' - return self.auth.makecmdoptions() - - def _authsvn(self, cmd, args=None): - args = args and list(args) or [] - args.append(self._makeauthoptions()) - return self._svn(cmd, *args) - - def _svn(self, cmd, *args): - l = ['svn %s' % cmd] - args = [self._escape(item) for item in args] - l.extend(args) - l.append('"%s"' % self._escape(self.strpath)) - # try fixing the locale because we can't otherwise parse - string = fixlocale() + " ".join(l) - try: - try: - key = 'LC_MESSAGES' - hold = os.environ.get(key) - os.environ[key] = 'C' - out = py.process.cmdexec(string) - finally: - if hold: - os.environ[key] = hold - else: - del os.environ[key] - except py.process.cmdexec.Error: - e = sys.exc_info()[1] - strerr = e.err.lower() - if strerr.find('not found') != -1: - raise py.error.ENOENT(self) - elif strerr.find("E200009:") != -1: - raise py.error.ENOENT(self) - if (strerr.find('file exists') != -1 or - strerr.find('file already exists') != -1 or - strerr.find('w150002:') != -1 or - strerr.find("can't create directory") != -1): - raise py.error.EEXIST(strerr) #self) - raise - return out - - def switch(self, url): - """ switch to given URL. """ - self._authsvn('switch', [url]) - - def checkout(self, url=None, rev=None): - """ checkout from url to local wcpath. """ - args = [] - if url is None: - url = self.url - if rev is None or rev == -1: - if (py.std.sys.platform != 'win32' and - _getsvnversion() == '1.3'): - url += "@HEAD" - else: - if _getsvnversion() == '1.3': - url += "@%d" % rev - else: - args.append('-r' + str(rev)) - args.append(url) - self._authsvn('co', args) - - def update(self, rev='HEAD', interactive=True): - """ update working copy item to given revision. (None -> HEAD). """ - opts = ['-r', rev] - if not interactive: - opts.append("--non-interactive") - self._authsvn('up', opts) - - def write(self, content, mode='w'): - """ write content into local filesystem wc. """ - self.localpath.write(content, mode) - - def dirpath(self, *args): - """ return the directory Path of the current Path. """ - return self.__class__(self.localpath.dirpath(*args), auth=self.auth) - - def _ensuredirs(self): - parent = self.dirpath() - if parent.check(dir=0): - parent._ensuredirs() - if self.check(dir=0): - self.mkdir() - return self - - def ensure(self, *args, **kwargs): - """ ensure that an args-joined path exists (by default as - a file). if you specify a keyword argument 'directory=True' - then the path is forced to be a directory path. - """ - p = self.join(*args) - if p.check(): - if p.check(versioned=False): - p.add() - return p - if kwargs.get('dir', 0): - return p._ensuredirs() - parent = p.dirpath() - parent._ensuredirs() - p.write("") - p.add() - return p - - def mkdir(self, *args): - """ create & return the directory joined with args. """ - if args: - return self.join(*args).mkdir() - else: - self._svn('mkdir') - return self - - def add(self): - """ add ourself to svn """ - self._svn('add') - - def remove(self, rec=1, force=1): - """ remove a file or a directory tree. 'rec'ursive is - ignored and considered always true (because of - underlying svn semantics. - """ - assert rec, "svn cannot remove non-recursively" - if not self.check(versioned=True): - # not added to svn (anymore?), just remove - py.path.local(self).remove() - return - flags = [] - if force: - flags.append('--force') - self._svn('remove', *flags) - - def copy(self, target): - """ copy path to target.""" - py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) - - def rename(self, target): - """ rename this path to target. """ - py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) - - def lock(self): - """ set a lock (exclusive) on the resource """ - out = self._authsvn('lock').strip() - if not out: - # warning or error, raise exception - raise ValueError("unknown error in svn lock command") - - def unlock(self): - """ unset a previously set lock """ - out = self._authsvn('unlock').strip() - if out.startswith('svn:'): - # warning or error, raise exception - raise Exception(out[4:]) - - def cleanup(self): - """ remove any locks from the resource """ - # XXX should be fixed properly!!! - try: - self.unlock() - except: - pass - - def status(self, updates=0, rec=0, externals=0): - """ return (collective) Status object for this file. """ - # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 - # 2201 2192 jum test - # XXX - if externals: - raise ValueError("XXX cannot perform status() " - "on external items yet") - else: - #1.2 supports: externals = '--ignore-externals' - externals = '' - if rec: - rec= '' - else: - rec = '--non-recursive' - - # XXX does not work on all subversion versions - #if not externals: - # externals = '--ignore-externals' - - if updates: - updates = '-u' - else: - updates = '' - - try: - cmd = 'status -v --xml --no-ignore %s %s %s' % ( - updates, rec, externals) - out = self._authsvn(cmd) - except py.process.cmdexec.Error: - cmd = 'status -v --no-ignore %s %s %s' % ( - updates, rec, externals) - out = self._authsvn(cmd) - rootstatus = WCStatus(self).fromstring(out, self) - else: - rootstatus = XMLWCStatus(self).fromstring(out, self) - return rootstatus - - def diff(self, rev=None): - """ return a diff of the current path against revision rev (defaulting - to the last one). - """ - args = [] - if rev is not None: - args.append("-r %d" % rev) - out = self._authsvn('diff', args) - return out - - def blame(self): - """ return a list of tuples of three elements: - (revision, commiter, line) - """ - out = self._svn('blame') - result = [] - blamelines = out.splitlines() - reallines = py.path.svnurl(self.url).readlines() - for i, (blameline, line) in enumerate( - zip(blamelines, reallines)): - m = rex_blame.match(blameline) - if not m: - raise ValueError("output line %r of svn blame does not match " - "expected format" % (line, )) - rev, name, _ = m.groups() - result.append((int(rev), name, line)) - return result - - _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) - def commit(self, msg='', rec=1): - """ commit with support for non-recursive commits """ - # XXX i guess escaping should be done better here?!? - cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),) - if not rec: - cmd += ' -N' - out = self._authsvn(cmd) - try: - del cache.info[self] - except KeyError: - pass - if out: - m = self._rex_commit.match(out) - return int(m.group(1)) - - def propset(self, name, value, *args): - """ set property name to value on this path. """ - d = py.path.local.mkdtemp() - try: - p = d.join('value') - p.write(value) - self._svn('propset', name, '--file', str(p), *args) - finally: - d.remove() - - def propget(self, name): - """ get property name on this path. """ - res = self._svn('propget', name) - return res[:-1] # strip trailing newline - - def propdel(self, name): - """ delete property name on this path. """ - res = self._svn('propdel', name) - return res[:-1] # strip trailing newline - - def proplist(self, rec=0): - """ return a mapping of property names to property values. -If rec is True, then return a dictionary mapping sub-paths to such mappings. -""" - if rec: - res = self._svn('proplist -R') - return make_recursive_propdict(self, res) - else: - res = self._svn('proplist') - lines = res.split('\n') - lines = [x.strip() for x in lines[1:]] - return PropListDict(self, lines) - - def revert(self, rec=0): - """ revert the local changes of this path. if rec is True, do so -recursively. """ - if rec: - result = self._svn('revert -R') - else: - result = self._svn('revert') - return result - - def new(self, **kw): - """ create a modified version of this path. A 'rev' argument - indicates a new revision. - the following keyword arguments modify various path parts: - - http://host.com/repo/path/file.ext - |-----------------------| dirname - |------| basename - |--| purebasename - |--| ext - """ - if kw: - localpath = self.localpath.new(**kw) - else: - localpath = self.localpath - return self.__class__(localpath, auth=self.auth) - - def join(self, *args, **kwargs): - """ return a new Path (with the same revision) which is composed - of the self Path followed by 'args' path components. - """ - if not args: - return self - localpath = self.localpath.join(*args, **kwargs) - return self.__class__(localpath, auth=self.auth) - - def info(self, usecache=1): - """ return an Info structure with svn-provided information. """ - info = usecache and cache.info.get(self) - if not info: - try: - output = self._svn('info') - except py.process.cmdexec.Error: - e = sys.exc_info()[1] - if e.err.find('Path is not a working copy directory') != -1: - raise py.error.ENOENT(self, e.err) - elif e.err.find("is not under version control") != -1: - raise py.error.ENOENT(self, e.err) - raise - # XXX SVN 1.3 has output on stderr instead of stdout (while it does - # return 0!), so a bit nasty, but we assume no output is output - # to stderr... - if (output.strip() == '' or - output.lower().find('not a versioned resource') != -1): - raise py.error.ENOENT(self, output) - info = InfoSvnWCCommand(output) - - # Can't reliably compare on Windows without access to win32api - if py.std.sys.platform != 'win32': - if info.path != self.localpath: - raise py.error.ENOENT(self, "not a versioned resource:" + - " %s != %s" % (info.path, self.localpath)) - cache.info[self] = info - return info - - def listdir(self, fil=None, sort=None): - """ return a sequence of Paths. - - listdir will return either a tuple or a list of paths - depending on implementation choices. - """ - if isinstance(fil, str): - fil = common.FNMatcher(fil) - # XXX unify argument naming with LocalPath.listdir - def notsvn(path): - return path.basename != '.svn' - - paths = [] - for localpath in self.localpath.listdir(notsvn): - p = self.__class__(localpath, auth=self.auth) - if notsvn(p) and (not fil or fil(p)): - paths.append(p) - self._sortlist(paths, sort) - return paths - - def open(self, mode='r'): - """ return an opened file with the given mode. """ - return open(self.strpath, mode) - - def _getbyspec(self, spec): - return self.localpath._getbyspec(spec) - - class Checkers(py.path.local.Checkers): - def __init__(self, path): - self.svnwcpath = path - self.path = path.localpath - def versioned(self): - try: - s = self.svnwcpath.info() - except (py.error.ENOENT, py.error.EEXIST): - return False - except py.process.cmdexec.Error: - e = sys.exc_info()[1] - if e.err.find('is not a working copy')!=-1: - return False - if e.err.lower().find('not a versioned resource') != -1: - return False - raise - else: - return True - - def log(self, rev_start=None, rev_end=1, verbose=False): - """ return a list of LogEntry instances for this path. -rev_start is the starting revision (defaulting to the first one). -rev_end is the last revision (defaulting to HEAD). -if verbose is True, then the LogEntry instances also know which files changed. -""" - assert self.check() # make it simpler for the pipe - rev_start = rev_start is None and "HEAD" or rev_start - rev_end = rev_end is None and "HEAD" or rev_end - if rev_start == "HEAD" and rev_end == 1: - rev_opt = "" - else: - rev_opt = "-r %s:%s" % (rev_start, rev_end) - verbose_opt = verbose and "-v" or "" - locale_env = fixlocale() - # some blather on stderr - auth_opt = self._makeauthoptions() - #stdin, stdout, stderr = os.popen3(locale_env + - # 'svn log --xml %s %s %s "%s"' % ( - # rev_opt, verbose_opt, auth_opt, - # self.strpath)) - cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( - rev_opt, verbose_opt, auth_opt, self.strpath) - - popen = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, - ) - stdout, stderr = popen.communicate() - stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) - minidom,ExpatError = importxml() - try: - tree = minidom.parseString(stdout) - except ExpatError: - raise ValueError('no such revision') - result = [] - for logentry in filter(None, tree.firstChild.childNodes): - if logentry.nodeType == logentry.ELEMENT_NODE: - result.append(LogEntry(logentry)) - return result - - def size(self): - """ Return the size of the file content of the Path. """ - return self.info().size - - def mtime(self): - """ Return the last modification time of the file. """ - return self.info().mtime - - def __hash__(self): - return hash((self.strpath, self.__class__, self.auth)) - - -class WCStatus: - attrnames = ('modified','added', 'conflict', 'unchanged', 'external', - 'deleted', 'prop_modified', 'unknown', 'update_available', - 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced' - ) - - def __init__(self, wcpath, rev=None, modrev=None, author=None): - self.wcpath = wcpath - self.rev = rev - self.modrev = modrev - self.author = author - - for name in self.attrnames: - setattr(self, name, []) - - def allpath(self, sort=True, **kw): - d = {} - for name in self.attrnames: - if name not in kw or kw[name]: - for path in getattr(self, name): - d[path] = 1 - l = d.keys() - if sort: - l.sort() - return l - - # XXX a bit scary to assume there's always 2 spaces between username and - # path, however with win32 allowing spaces in user names there doesn't - # seem to be a more solid approach :( - _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)') - - def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): - """ return a new WCStatus object from data 's' - """ - rootstatus = WCStatus(rootwcpath, rev, modrev, author) - update_rev = None - for line in data.split('\n'): - if not line.strip(): - continue - #print "processing %r" % line - flags, rest = line[:8], line[8:] - # first column - c0,c1,c2,c3,c4,c5,x6,c7 = flags - #if '*' in line: - # print "flags", repr(flags), "rest", repr(rest) - - if c0 in '?XI': - fn = line.split(None, 1)[1] - if c0 == '?': - wcpath = rootwcpath.join(fn, abs=1) - rootstatus.unknown.append(wcpath) - elif c0 == 'X': - wcpath = rootwcpath.__class__( - rootwcpath.localpath.join(fn, abs=1), - auth=rootwcpath.auth) - rootstatus.external.append(wcpath) - elif c0 == 'I': - wcpath = rootwcpath.join(fn, abs=1) - rootstatus.ignored.append(wcpath) - - continue - - #elif c0 in '~!' or c4 == 'S': - # raise NotImplementedError("received flag %r" % c0) - - m = WCStatus._rex_status.match(rest) - if not m: - if c7 == '*': - fn = rest.strip() - wcpath = rootwcpath.join(fn, abs=1) - rootstatus.update_available.append(wcpath) - continue - if line.lower().find('against revision:')!=-1: - update_rev = int(rest.split(':')[1].strip()) - continue - if line.lower().find('status on external') > -1: - # XXX not sure what to do here... perhaps we want to - # store some state instead of just continuing, as right - # now it makes the top-level external get added twice - # (once as external, once as 'normal' unchanged item) - # because of the way SVN presents external items - continue - # keep trying - raise ValueError("could not parse line %r" % line) - else: - rev, modrev, author, fn = m.groups() - wcpath = rootwcpath.join(fn, abs=1) - #assert wcpath.check() - if c0 == 'M': - assert wcpath.check(file=1), "didn't expect a directory with changed content here" - rootstatus.modified.append(wcpath) - elif c0 == 'A' or c3 == '+' : - rootstatus.added.append(wcpath) - elif c0 == 'D': - rootstatus.deleted.append(wcpath) - elif c0 == 'C': - rootstatus.conflict.append(wcpath) - elif c0 == '~': - rootstatus.kindmismatch.append(wcpath) - elif c0 == '!': - rootstatus.incomplete.append(wcpath) - elif c0 == 'R': - rootstatus.replaced.append(wcpath) - elif not c0.strip(): - rootstatus.unchanged.append(wcpath) - else: - raise NotImplementedError("received flag %r" % c0) - - if c1 == 'M': - rootstatus.prop_modified.append(wcpath) - # XXX do we cover all client versions here? - if c2 == 'L' or c5 == 'K': - rootstatus.locked.append(wcpath) - if c7 == '*': - rootstatus.update_available.append(wcpath) - - if wcpath == rootwcpath: - rootstatus.rev = rev - rootstatus.modrev = modrev - rootstatus.author = author - if update_rev: - rootstatus.update_rev = update_rev - continue - return rootstatus - fromstring = staticmethod(fromstring) - -class XMLWCStatus(WCStatus): - def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): - """ parse 'data' (XML string as outputted by svn st) into a status obj - """ - # XXX for externals, the path is shown twice: once - # with external information, and once with full info as if - # the item was a normal non-external... the current way of - # dealing with this issue is by ignoring it - this does make - # externals appear as external items as well as 'normal', - # unchanged ones in the status object so this is far from ideal - rootstatus = WCStatus(rootwcpath, rev, modrev, author) - update_rev = None - minidom, ExpatError = importxml() - try: - doc = minidom.parseString(data) - except ExpatError: - e = sys.exc_info()[1] - raise ValueError(str(e)) - urevels = doc.getElementsByTagName('against') - if urevels: - rootstatus.update_rev = urevels[-1].getAttribute('revision') - for entryel in doc.getElementsByTagName('entry'): - path = entryel.getAttribute('path') - statusel = entryel.getElementsByTagName('wc-status')[0] - itemstatus = statusel.getAttribute('item') - - if itemstatus == 'unversioned': - wcpath = rootwcpath.join(path, abs=1) - rootstatus.unknown.append(wcpath) - continue - elif itemstatus == 'external': - wcpath = rootwcpath.__class__( - rootwcpath.localpath.join(path, abs=1), - auth=rootwcpath.auth) - rootstatus.external.append(wcpath) - continue - elif itemstatus == 'ignored': - wcpath = rootwcpath.join(path, abs=1) - rootstatus.ignored.append(wcpath) - continue - elif itemstatus == 'incomplete': - wcpath = rootwcpath.join(path, abs=1) - rootstatus.incomplete.append(wcpath) - continue - - rev = statusel.getAttribute('revision') - if itemstatus == 'added' or itemstatus == 'none': - rev = '0' - modrev = '?' - author = '?' - date = '' - elif itemstatus == "replaced": - pass - else: - #print entryel.toxml() - commitel = entryel.getElementsByTagName('commit')[0] - if commitel: - modrev = commitel.getAttribute('revision') - author = '' - author_els = commitel.getElementsByTagName('author') - if author_els: - for c in author_els[0].childNodes: - author += c.nodeValue - date = '' - for c in commitel.getElementsByTagName('date')[0]\ - .childNodes: - date += c.nodeValue - - wcpath = rootwcpath.join(path, abs=1) - - assert itemstatus != 'modified' or wcpath.check(file=1), ( - 'did\'t expect a directory with changed content here') - - itemattrname = { - 'normal': 'unchanged', - 'unversioned': 'unknown', - 'conflicted': 'conflict', - 'none': 'added', - }.get(itemstatus, itemstatus) - - attr = getattr(rootstatus, itemattrname) - attr.append(wcpath) - - propsstatus = statusel.getAttribute('props') - if propsstatus not in ('none', 'normal'): - rootstatus.prop_modified.append(wcpath) - - if wcpath == rootwcpath: - rootstatus.rev = rev - rootstatus.modrev = modrev - rootstatus.author = author - rootstatus.date = date - - # handle repos-status element (remote info) - rstatusels = entryel.getElementsByTagName('repos-status') - if rstatusels: - rstatusel = rstatusels[0] - ritemstatus = rstatusel.getAttribute('item') - if ritemstatus in ('added', 'modified'): - rootstatus.update_available.append(wcpath) - - lockels = entryel.getElementsByTagName('lock') - if len(lockels): - rootstatus.locked.append(wcpath) - - return rootstatus - fromstring = staticmethod(fromstring) - -class InfoSvnWCCommand: - def __init__(self, output): - # Path: test - # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test - # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada - # Revision: 2151 - # Node Kind: directory - # Schedule: normal - # Last Changed Author: hpk - # Last Changed Rev: 2100 - # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) - # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) - - d = {} - for line in output.split('\n'): - if not line.strip(): - continue - key, value = line.split(':', 1) - key = key.lower().replace(' ', '') - value = value.strip() - d[key] = value - try: - self.url = d['url'] - except KeyError: - raise ValueError("Not a versioned resource") - #raise ValueError, "Not a versioned resource %r" % path - self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] - try: - self.rev = int(d['revision']) - except KeyError: - self.rev = None - - self.path = py.path.local(d['path']) - self.size = self.path.size() - if 'lastchangedrev' in d: - self.created_rev = int(d['lastchangedrev']) - if 'lastchangedauthor' in d: - self.last_author = d['lastchangedauthor'] - if 'lastchangeddate' in d: - self.mtime = parse_wcinfotime(d['lastchangeddate']) - self.time = self.mtime * 1000000 - - def __eq__(self, other): - return self.__dict__ == other.__dict__ - -def parse_wcinfotime(timestr): - """ Returns seconds since epoch, UTC. """ - # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) - m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) - if not m: - raise ValueError("timestring %r does not match" % timestr) - timestr, timezone = m.groups() - # do not handle timezone specially, return value should be UTC - parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") - return calendar.timegm(parsedtime) - -def make_recursive_propdict(wcroot, - output, - rex = re.compile("Properties on '(.*)':")): - """ Return a dictionary of path->PropListDict mappings. """ - lines = [x for x in output.split('\n') if x] - pdict = {} - while lines: - line = lines.pop(0) - m = rex.match(line) - if not m: - raise ValueError("could not parse propget-line: %r" % line) - path = m.groups()[0] - wcpath = wcroot.join(path, abs=1) - propnames = [] - while lines and lines[0].startswith(' '): - propname = lines.pop(0).strip() - propnames.append(propname) - assert propnames, "must have found properties!" - pdict[wcpath] = PropListDict(wcpath, propnames) - return pdict - - -def importxml(cache=[]): - if cache: - return cache - from xml.dom import minidom - from xml.parsers.expat import ExpatError - cache.extend([minidom, ExpatError]) - return cache - -class LogEntry: - def __init__(self, logentry): - self.rev = int(logentry.getAttribute('revision')) - for lpart in filter(None, logentry.childNodes): - if lpart.nodeType == lpart.ELEMENT_NODE: - if lpart.nodeName == 'author': - self.author = lpart.firstChild.nodeValue - elif lpart.nodeName == 'msg': - if lpart.firstChild: - self.msg = lpart.firstChild.nodeValue - else: - self.msg = '' - elif lpart.nodeName == 'date': - #2003-07-29T20:05:11.598637Z - timestr = lpart.firstChild.nodeValue - self.date = parse_apr_time(timestr) - elif lpart.nodeName == 'paths': - self.strpaths = [] - for ppart in filter(None, lpart.childNodes): - if ppart.nodeType == ppart.ELEMENT_NODE: - self.strpaths.append(PathEntry(ppart)) - def __repr__(self): - return '' % ( - self.rev, self.author, self.date) - - +""" +svn-Command based Implementation of a Subversion WorkingCopy Path. + + SvnWCCommandPath is the main class. + +""" + +import os, sys, time, re, calendar +import py +import subprocess +from py._path import common + +#----------------------------------------------------------- +# Caching latest repository revision and repo-paths +# (getting them is slow with the current implementations) +# +# XXX make mt-safe +#----------------------------------------------------------- + +class cache: + proplist = {} + info = {} + entries = {} + prop = {} + +class RepoEntry: + def __init__(self, url, rev, timestamp): + self.url = url + self.rev = rev + self.timestamp = timestamp + + def __str__(self): + return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) + +class RepoCache: + """ The Repocache manages discovered repository paths + and their revisions. If inside a timeout the cache + will even return the revision of the root. + """ + timeout = 20 # seconds after which we forget that we know the last revision + + def __init__(self): + self.repos = [] + + def clear(self): + self.repos = [] + + def put(self, url, rev, timestamp=None): + if rev is None: + return + if timestamp is None: + timestamp = time.time() + + for entry in self.repos: + if url == entry.url: + entry.timestamp = timestamp + entry.rev = rev + #print "set repo", entry + break + else: + entry = RepoEntry(url, rev, timestamp) + self.repos.append(entry) + #print "appended repo", entry + + def get(self, url): + now = time.time() + for entry in self.repos: + if url.startswith(entry.url): + if now < entry.timestamp + self.timeout: + #print "returning immediate Etrny", entry + return entry.url, entry.rev + return entry.url, -1 + return url, -1 + +repositories = RepoCache() + + +# svn support code + +ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested +if sys.platform == "win32": + ALLOWED_CHARS += ":" +ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' + +def _getsvnversion(ver=[]): + try: + return ver[0] + except IndexError: + v = py.process.cmdexec("svn -q --version") + v.strip() + v = '.'.join(v.split('.')[:2]) + ver.append(v) + return v + +def _escape_helper(text): + text = str(text) + if py.std.sys.platform != 'win32': + text = str(text).replace('$', '\\$') + return text + +def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): + for c in str(text): + if c.isalnum(): + continue + if c in allowed_chars: + continue + return True + return False + +def checkbadchars(url): + # (hpk) not quite sure about the exact purpose, guido w.? + proto, uri = url.split("://", 1) + if proto != "file": + host, uripath = uri.split('/', 1) + # only check for bad chars in the non-protocol parts + if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ + or _check_for_bad_chars(uripath, ALLOWED_CHARS)): + raise ValueError("bad char in %r" % (url, )) + + +#_______________________________________________________________ + +class SvnPathBase(common.PathBase): + """ Base implementation for SvnPath implementations. """ + sep = '/' + + def _geturl(self): + return self.strpath + url = property(_geturl, None, None, "url of this svn-path.") + + def __str__(self): + """ return a string representation (including rev-number) """ + return self.strpath + + def __hash__(self): + return hash(self.strpath) + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts:: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + obj.rev = kw.get('rev', self.rev) + obj.auth = kw.get('auth', self.auth) + dirname, basename, purebasename, ext = self._getbyspec( + "dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + ext = kw.setdefault('ext', ext) + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + if kw['basename']: + obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw + else: + obj.strpath = "%(dirname)s" % kw + return obj + + def _getbyspec(self, spec): + """ get specified parts of the path. 'arg' is a string + with comma separated path parts. The parts are returned + in exactly the order of the specification. + + you may specify the following parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + for name in spec.split(','): + name = name.strip() + if name == 'dirname': + res.append(self.sep.join(parts[:-1])) + elif name == 'basename': + res.append(parts[-1]) + else: + basename = parts[-1] + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + res.append(purebasename) + elif name == 'ext': + res.append(ext) + else: + raise NameError("Don't know part %r" % name) + return res + + def __eq__(self, other): + """ return true if path and rev attributes each match """ + return (str(self) == str(other) and + (self.rev == other.rev or self.rev == other.rev)) + + def __ne__(self, other): + return not self == other + + def join(self, *args): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + + args = tuple([arg.strip(self.sep) for arg in args]) + parts = (self.strpath, ) + args + newpath = self.__class__(self.sep.join(parts), self.rev, self.auth) + return newpath + + def propget(self, name): + """ return the content of the given property. """ + value = self._propget(name) + return value + + def proplist(self): + """ list all property names. """ + content = self._proplist() + return content + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + # shared help methods + + def _escape(self, cmd): + return _escape_helper(cmd) + + + #def _childmaxrev(self): + # """ return maximum revision number of childs (or self.rev if no childs) """ + # rev = self.rev + # for name, info in self._listdir_nameinfo(): + # rev = max(rev, info.created_rev) + # return rev + + #def _getlatestrevision(self): + # """ return latest repo-revision for this path. """ + # url = self.strpath + # path = self.__class__(url, None) + # + # # we need a long walk to find the root-repo and revision + # while 1: + # try: + # rev = max(rev, path._childmaxrev()) + # previous = path + # path = path.dirpath() + # except (IOError, process.cmdexec.Error): + # break + # if rev is None: + # raise IOError, "could not determine newest repo revision for %s" % self + # return rev + + class Checkers(common.Checkers): + def dir(self): + try: + return self.path.info().kind == 'dir' + except py.error.Error: + return self._listdirworks() + + def _listdirworks(self): + try: + self.path.listdir() + except py.error.ENOENT: + return False + else: + return True + + def file(self): + try: + return self.path.info().kind == 'file' + except py.error.ENOENT: + return False + + def exists(self): + try: + return self.path.info() + except py.error.ENOENT: + return self._listdirworks() + +def parse_apr_time(timestr): + i = timestr.rfind('.') + if i == -1: + raise ValueError("could not parse %s" % timestr) + timestr = timestr[:i] + parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") + return time.mktime(parsedtime) + +class PropListDict(dict): + """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" + def __init__(self, path, keynames): + dict.__init__(self, [(x, None) for x in keynames]) + self.path = path + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + if value is None: + value = self.path.propget(key) + dict.__setitem__(self, key, value) + return value + +def fixlocale(): + if sys.platform != 'win32': + return 'LC_ALL=C ' + return '' + +# some nasty chunk of code to solve path and url conversion and quoting issues +ILLEGAL_CHARS = '* | \\ / : < > ? \t \n \x0b \x0c \r'.split(' ') +if os.sep in ILLEGAL_CHARS: + ILLEGAL_CHARS.remove(os.sep) +ISWINDOWS = sys.platform == 'win32' +_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) +def _check_path(path): + illegal = ILLEGAL_CHARS[:] + sp = path.strpath + if ISWINDOWS: + illegal.remove(':') + if not _reg_allow_disk.match(sp): + raise ValueError('path may not contain a colon (:)') + for char in sp: + if char not in string.printable or char in illegal: + raise ValueError('illegal character %r in path' % (char,)) + +def path_to_fspath(path, addat=True): + _check_path(path) + sp = path.strpath + if addat and path.rev != -1: + sp = '%s@%s' % (sp, path.rev) + elif addat: + sp = '%s@HEAD' % (sp,) + return sp + +def url_from_path(path): + fspath = path_to_fspath(path, False) + quote = py.std.urllib.quote + if ISWINDOWS: + match = _reg_allow_disk.match(fspath) + fspath = fspath.replace('\\', '/') + if match.group(1): + fspath = '/%s%s' % (match.group(1).replace('\\', '/'), + quote(fspath[len(match.group(1)):])) + else: + fspath = quote(fspath) + else: + fspath = quote(fspath) + if path.rev != -1: + fspath = '%s@%s' % (fspath, path.rev) + else: + fspath = '%s@HEAD' % (fspath,) + return 'file://%s' % (fspath,) + +class SvnAuth(object): + """ container for auth information for Subversion """ + def __init__(self, username, password, cache_auth=True, interactive=True): + self.username = username + self.password = password + self.cache_auth = cache_auth + self.interactive = interactive + + def makecmdoptions(self): + uname = self.username.replace('"', '\\"') + passwd = self.password.replace('"', '\\"') + ret = [] + if uname: + ret.append('--username="%s"' % (uname,)) + if passwd: + ret.append('--password="%s"' % (passwd,)) + if not self.cache_auth: + ret.append('--no-auth-cache') + if not self.interactive: + ret.append('--non-interactive') + return ' '.join(ret) + + def __str__(self): + return "" %(self.username,) + +rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') + +class SvnWCCommandPath(common.PathBase): + """ path implementation offering access/modification to svn working copies. + It has methods similar to the functions in os.path and similar to the + commands of the svn client. + """ + sep = os.sep + + def __new__(cls, wcpath=None, auth=None): + self = object.__new__(cls) + if isinstance(wcpath, cls): + if wcpath.__class__ == cls: + return wcpath + wcpath = wcpath.localpath + if _check_for_bad_chars(str(wcpath), + ALLOWED_CHARS): + raise ValueError("bad char in wcpath %s" % (wcpath, )) + self.localpath = py.path.local(wcpath) + self.auth = auth + return self + + strpath = property(lambda x: str(x.localpath), None, None, "string path") + rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision") + + def __eq__(self, other): + return self.localpath == getattr(other, 'localpath', None) + + def _geturl(self): + if getattr(self, '_url', None) is None: + info = self.info() + self._url = info.url #SvnPath(info.url, info.rev) + assert isinstance(self._url, py.builtin._basestring) + return self._url + + url = property(_geturl, None, None, "url of this WC item") + + def _escape(self, cmd): + return _escape_helper(cmd) + + def dump(self, obj): + """ pickle object into path location""" + return self.localpath.dump(obj) + + def svnurl(self): + """ return current SvnPath for this WC-item. """ + info = self.info() + return py.path.svnurl(info.url) + + def __repr__(self): + return "svnwc(%r)" % (self.strpath) # , self._url) + + def __str__(self): + return str(self.localpath) + + def _makeauthoptions(self): + if self.auth is None: + return '' + return self.auth.makecmdoptions() + + def _authsvn(self, cmd, args=None): + args = args and list(args) or [] + args.append(self._makeauthoptions()) + return self._svn(cmd, *args) + + def _svn(self, cmd, *args): + l = ['svn %s' % cmd] + args = [self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._escape(self.strpath)) + # try fixing the locale because we can't otherwise parse + string = fixlocale() + " ".join(l) + try: + try: + key = 'LC_MESSAGES' + hold = os.environ.get(key) + os.environ[key] = 'C' + out = py.process.cmdexec(string) + finally: + if hold: + os.environ[key] = hold + else: + del os.environ[key] + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + strerr = e.err.lower() + if strerr.find('not found') != -1: + raise py.error.ENOENT(self) + elif strerr.find("E200009:") != -1: + raise py.error.ENOENT(self) + if (strerr.find('file exists') != -1 or + strerr.find('file already exists') != -1 or + strerr.find('w150002:') != -1 or + strerr.find("can't create directory") != -1): + raise py.error.EEXIST(strerr) #self) + raise + return out + + def switch(self, url): + """ switch to given URL. """ + self._authsvn('switch', [url]) + + def checkout(self, url=None, rev=None): + """ checkout from url to local wcpath. """ + args = [] + if url is None: + url = self.url + if rev is None or rev == -1: + if (py.std.sys.platform != 'win32' and + _getsvnversion() == '1.3'): + url += "@HEAD" + else: + if _getsvnversion() == '1.3': + url += "@%d" % rev + else: + args.append('-r' + str(rev)) + args.append(url) + self._authsvn('co', args) + + def update(self, rev='HEAD', interactive=True): + """ update working copy item to given revision. (None -> HEAD). """ + opts = ['-r', rev] + if not interactive: + opts.append("--non-interactive") + self._authsvn('up', opts) + + def write(self, content, mode='w'): + """ write content into local filesystem wc. """ + self.localpath.write(content, mode) + + def dirpath(self, *args): + """ return the directory Path of the current Path. """ + return self.__class__(self.localpath.dirpath(*args), auth=self.auth) + + def _ensuredirs(self): + parent = self.dirpath() + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + self.mkdir() + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'directory=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if p.check(): + if p.check(versioned=False): + p.add() + return p + if kwargs.get('dir', 0): + return p._ensuredirs() + parent = p.dirpath() + parent._ensuredirs() + p.write("") + p.add() + return p + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + if args: + return self.join(*args).mkdir() + else: + self._svn('mkdir') + return self + + def add(self): + """ add ourself to svn """ + self._svn('add') + + def remove(self, rec=1, force=1): + """ remove a file or a directory tree. 'rec'ursive is + ignored and considered always true (because of + underlying svn semantics. + """ + assert rec, "svn cannot remove non-recursively" + if not self.check(versioned=True): + # not added to svn (anymore?), just remove + py.path.local(self).remove() + return + flags = [] + if force: + flags.append('--force') + self._svn('remove', *flags) + + def copy(self, target): + """ copy path to target.""" + py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) + + def rename(self, target): + """ rename this path to target. """ + py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) + + def lock(self): + """ set a lock (exclusive) on the resource """ + out = self._authsvn('lock').strip() + if not out: + # warning or error, raise exception + raise ValueError("unknown error in svn lock command") + + def unlock(self): + """ unset a previously set lock """ + out = self._authsvn('unlock').strip() + if out.startswith('svn:'): + # warning or error, raise exception + raise Exception(out[4:]) + + def cleanup(self): + """ remove any locks from the resource """ + # XXX should be fixed properly!!! + try: + self.unlock() + except: + pass + + def status(self, updates=0, rec=0, externals=0): + """ return (collective) Status object for this file. """ + # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 + # 2201 2192 jum test + # XXX + if externals: + raise ValueError("XXX cannot perform status() " + "on external items yet") + else: + #1.2 supports: externals = '--ignore-externals' + externals = '' + if rec: + rec= '' + else: + rec = '--non-recursive' + + # XXX does not work on all subversion versions + #if not externals: + # externals = '--ignore-externals' + + if updates: + updates = '-u' + else: + updates = '' + + try: + cmd = 'status -v --xml --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + except py.process.cmdexec.Error: + cmd = 'status -v --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + rootstatus = WCStatus(self).fromstring(out, self) + else: + rootstatus = XMLWCStatus(self).fromstring(out, self) + return rootstatus + + def diff(self, rev=None): + """ return a diff of the current path against revision rev (defaulting + to the last one). + """ + args = [] + if rev is not None: + args.append("-r %d" % rev) + out = self._authsvn('diff', args) + return out + + def blame(self): + """ return a list of tuples of three elements: + (revision, commiter, line) + """ + out = self._svn('blame') + result = [] + blamelines = out.splitlines() + reallines = py.path.svnurl(self.url).readlines() + for i, (blameline, line) in enumerate( + zip(blamelines, reallines)): + m = rex_blame.match(blameline) + if not m: + raise ValueError("output line %r of svn blame does not match " + "expected format" % (line, )) + rev, name, _ = m.groups() + result.append((int(rev), name, line)) + return result + + _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) + def commit(self, msg='', rec=1): + """ commit with support for non-recursive commits """ + # XXX i guess escaping should be done better here?!? + cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),) + if not rec: + cmd += ' -N' + out = self._authsvn(cmd) + try: + del cache.info[self] + except KeyError: + pass + if out: + m = self._rex_commit.match(out) + return int(m.group(1)) + + def propset(self, name, value, *args): + """ set property name to value on this path. """ + d = py.path.local.mkdtemp() + try: + p = d.join('value') + p.write(value) + self._svn('propset', name, '--file', str(p), *args) + finally: + d.remove() + + def propget(self, name): + """ get property name on this path. """ + res = self._svn('propget', name) + return res[:-1] # strip trailing newline + + def propdel(self, name): + """ delete property name on this path. """ + res = self._svn('propdel', name) + return res[:-1] # strip trailing newline + + def proplist(self, rec=0): + """ return a mapping of property names to property values. +If rec is True, then return a dictionary mapping sub-paths to such mappings. +""" + if rec: + res = self._svn('proplist -R') + return make_recursive_propdict(self, res) + else: + res = self._svn('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return PropListDict(self, lines) + + def revert(self, rec=0): + """ revert the local changes of this path. if rec is True, do so +recursively. """ + if rec: + result = self._svn('revert -R') + else: + result = self._svn('revert') + return result + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + if kw: + localpath = self.localpath.new(**kw) + else: + localpath = self.localpath + return self.__class__(localpath, auth=self.auth) + + def join(self, *args, **kwargs): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + localpath = self.localpath.join(*args, **kwargs) + return self.__class__(localpath, auth=self.auth) + + def info(self, usecache=1): + """ return an Info structure with svn-provided information. """ + info = usecache and cache.info.get(self) + if not info: + try: + output = self._svn('info') + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('Path is not a working copy directory') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("is not under version control") != -1: + raise py.error.ENOENT(self, e.err) + raise + # XXX SVN 1.3 has output on stderr instead of stdout (while it does + # return 0!), so a bit nasty, but we assume no output is output + # to stderr... + if (output.strip() == '' or + output.lower().find('not a versioned resource') != -1): + raise py.error.ENOENT(self, output) + info = InfoSvnWCCommand(output) + + # Can't reliably compare on Windows without access to win32api + if py.std.sys.platform != 'win32': + if info.path != self.localpath: + raise py.error.ENOENT(self, "not a versioned resource:" + + " %s != %s" % (info.path, self.localpath)) + cache.info[self] = info + return info + + def listdir(self, fil=None, sort=None): + """ return a sequence of Paths. + + listdir will return either a tuple or a list of paths + depending on implementation choices. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + # XXX unify argument naming with LocalPath.listdir + def notsvn(path): + return path.basename != '.svn' + + paths = [] + for localpath in self.localpath.listdir(notsvn): + p = self.__class__(localpath, auth=self.auth) + if notsvn(p) and (not fil or fil(p)): + paths.append(p) + self._sortlist(paths, sort) + return paths + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return open(self.strpath, mode) + + def _getbyspec(self, spec): + return self.localpath._getbyspec(spec) + + class Checkers(py.path.local.Checkers): + def __init__(self, path): + self.svnwcpath = path + self.path = path.localpath + def versioned(self): + try: + s = self.svnwcpath.info() + except (py.error.ENOENT, py.error.EEXIST): + return False + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('is not a working copy')!=-1: + return False + if e.err.lower().find('not a versioned resource') != -1: + return False + raise + else: + return True + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() # make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + locale_env = fixlocale() + # some blather on stderr + auth_opt = self._makeauthoptions() + #stdin, stdout, stderr = os.popen3(locale_env + + # 'svn log --xml %s %s %s "%s"' % ( + # rev_opt, verbose_opt, auth_opt, + # self.strpath)) + cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( + rev_opt, verbose_opt, auth_opt, self.strpath) + + popen = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + stdout, stderr = popen.communicate() + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + minidom,ExpatError = importxml() + try: + tree = minidom.parseString(stdout) + except ExpatError: + raise ValueError('no such revision') + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(LogEntry(logentry)) + return result + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + def __hash__(self): + return hash((self.strpath, self.__class__, self.auth)) + + +class WCStatus: + attrnames = ('modified','added', 'conflict', 'unchanged', 'external', + 'deleted', 'prop_modified', 'unknown', 'update_available', + 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced' + ) + + def __init__(self, wcpath, rev=None, modrev=None, author=None): + self.wcpath = wcpath + self.rev = rev + self.modrev = modrev + self.author = author + + for name in self.attrnames: + setattr(self, name, []) + + def allpath(self, sort=True, **kw): + d = {} + for name in self.attrnames: + if name not in kw or kw[name]: + for path in getattr(self, name): + d[path] = 1 + l = d.keys() + if sort: + l.sort() + return l + + # XXX a bit scary to assume there's always 2 spaces between username and + # path, however with win32 allowing spaces in user names there doesn't + # seem to be a more solid approach :( + _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)') + + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ return a new WCStatus object from data 's' + """ + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + for line in data.split('\n'): + if not line.strip(): + continue + #print "processing %r" % line + flags, rest = line[:8], line[8:] + # first column + c0,c1,c2,c3,c4,c5,x6,c7 = flags + #if '*' in line: + # print "flags", repr(flags), "rest", repr(rest) + + if c0 in '?XI': + fn = line.split(None, 1)[1] + if c0 == '?': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.unknown.append(wcpath) + elif c0 == 'X': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(fn, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + elif c0 == 'I': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.ignored.append(wcpath) + + continue + + #elif c0 in '~!' or c4 == 'S': + # raise NotImplementedError("received flag %r" % c0) + + m = WCStatus._rex_status.match(rest) + if not m: + if c7 == '*': + fn = rest.strip() + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.update_available.append(wcpath) + continue + if line.lower().find('against revision:')!=-1: + update_rev = int(rest.split(':')[1].strip()) + continue + if line.lower().find('status on external') > -1: + # XXX not sure what to do here... perhaps we want to + # store some state instead of just continuing, as right + # now it makes the top-level external get added twice + # (once as external, once as 'normal' unchanged item) + # because of the way SVN presents external items + continue + # keep trying + raise ValueError("could not parse line %r" % line) + else: + rev, modrev, author, fn = m.groups() + wcpath = rootwcpath.join(fn, abs=1) + #assert wcpath.check() + if c0 == 'M': + assert wcpath.check(file=1), "didn't expect a directory with changed content here" + rootstatus.modified.append(wcpath) + elif c0 == 'A' or c3 == '+' : + rootstatus.added.append(wcpath) + elif c0 == 'D': + rootstatus.deleted.append(wcpath) + elif c0 == 'C': + rootstatus.conflict.append(wcpath) + elif c0 == '~': + rootstatus.kindmismatch.append(wcpath) + elif c0 == '!': + rootstatus.incomplete.append(wcpath) + elif c0 == 'R': + rootstatus.replaced.append(wcpath) + elif not c0.strip(): + rootstatus.unchanged.append(wcpath) + else: + raise NotImplementedError("received flag %r" % c0) + + if c1 == 'M': + rootstatus.prop_modified.append(wcpath) + # XXX do we cover all client versions here? + if c2 == 'L' or c5 == 'K': + rootstatus.locked.append(wcpath) + if c7 == '*': + rootstatus.update_available.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + if update_rev: + rootstatus.update_rev = update_rev + continue + return rootstatus + fromstring = staticmethod(fromstring) + +class XMLWCStatus(WCStatus): + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ parse 'data' (XML string as outputted by svn st) into a status obj + """ + # XXX for externals, the path is shown twice: once + # with external information, and once with full info as if + # the item was a normal non-external... the current way of + # dealing with this issue is by ignoring it - this does make + # externals appear as external items as well as 'normal', + # unchanged ones in the status object so this is far from ideal + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + minidom, ExpatError = importxml() + try: + doc = minidom.parseString(data) + except ExpatError: + e = sys.exc_info()[1] + raise ValueError(str(e)) + urevels = doc.getElementsByTagName('against') + if urevels: + rootstatus.update_rev = urevels[-1].getAttribute('revision') + for entryel in doc.getElementsByTagName('entry'): + path = entryel.getAttribute('path') + statusel = entryel.getElementsByTagName('wc-status')[0] + itemstatus = statusel.getAttribute('item') + + if itemstatus == 'unversioned': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.unknown.append(wcpath) + continue + elif itemstatus == 'external': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(path, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + continue + elif itemstatus == 'ignored': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.ignored.append(wcpath) + continue + elif itemstatus == 'incomplete': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.incomplete.append(wcpath) + continue + + rev = statusel.getAttribute('revision') + if itemstatus == 'added' or itemstatus == 'none': + rev = '0' + modrev = '?' + author = '?' + date = '' + elif itemstatus == "replaced": + pass + else: + #print entryel.toxml() + commitel = entryel.getElementsByTagName('commit')[0] + if commitel: + modrev = commitel.getAttribute('revision') + author = '' + author_els = commitel.getElementsByTagName('author') + if author_els: + for c in author_els[0].childNodes: + author += c.nodeValue + date = '' + for c in commitel.getElementsByTagName('date')[0]\ + .childNodes: + date += c.nodeValue + + wcpath = rootwcpath.join(path, abs=1) + + assert itemstatus != 'modified' or wcpath.check(file=1), ( + 'did\'t expect a directory with changed content here') + + itemattrname = { + 'normal': 'unchanged', + 'unversioned': 'unknown', + 'conflicted': 'conflict', + 'none': 'added', + }.get(itemstatus, itemstatus) + + attr = getattr(rootstatus, itemattrname) + attr.append(wcpath) + + propsstatus = statusel.getAttribute('props') + if propsstatus not in ('none', 'normal'): + rootstatus.prop_modified.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + rootstatus.date = date + + # handle repos-status element (remote info) + rstatusels = entryel.getElementsByTagName('repos-status') + if rstatusels: + rstatusel = rstatusels[0] + ritemstatus = rstatusel.getAttribute('item') + if ritemstatus in ('added', 'modified'): + rootstatus.update_available.append(wcpath) + + lockels = entryel.getElementsByTagName('lock') + if len(lockels): + rootstatus.locked.append(wcpath) + + return rootstatus + fromstring = staticmethod(fromstring) + +class InfoSvnWCCommand: + def __init__(self, output): + # Path: test + # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test + # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada + # Revision: 2151 + # Node Kind: directory + # Schedule: normal + # Last Changed Author: hpk + # Last Changed Rev: 2100 + # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) + + d = {} + for line in output.split('\n'): + if not line.strip(): + continue + key, value = line.split(':', 1) + key = key.lower().replace(' ', '') + value = value.strip() + d[key] = value + try: + self.url = d['url'] + except KeyError: + raise ValueError("Not a versioned resource") + #raise ValueError, "Not a versioned resource %r" % path + self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] + try: + self.rev = int(d['revision']) + except KeyError: + self.rev = None + + self.path = py.path.local(d['path']) + self.size = self.path.size() + if 'lastchangedrev' in d: + self.created_rev = int(d['lastchangedrev']) + if 'lastchangedauthor' in d: + self.last_author = d['lastchangedauthor'] + if 'lastchangeddate' in d: + self.mtime = parse_wcinfotime(d['lastchangeddate']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + +def parse_wcinfotime(timestr): + """ Returns seconds since epoch, UTC. """ + # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) + if not m: + raise ValueError("timestring %r does not match" % timestr) + timestr, timezone = m.groups() + # do not handle timezone specially, return value should be UTC + parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") + return calendar.timegm(parsedtime) + +def make_recursive_propdict(wcroot, + output, + rex = re.compile("Properties on '(.*)':")): + """ Return a dictionary of path->PropListDict mappings. """ + lines = [x for x in output.split('\n') if x] + pdict = {} + while lines: + line = lines.pop(0) + m = rex.match(line) + if not m: + raise ValueError("could not parse propget-line: %r" % line) + path = m.groups()[0] + wcpath = wcroot.join(path, abs=1) + propnames = [] + while lines and lines[0].startswith(' '): + propname = lines.pop(0).strip() + propnames.append(propname) + assert propnames, "must have found properties!" + pdict[wcpath] = PropListDict(wcpath, propnames) + return pdict + + +def importxml(cache=[]): + if cache: + return cache + from xml.dom import minidom + from xml.parsers.expat import ExpatError + cache.extend([minidom, ExpatError]) + return cache + +class LogEntry: + def __init__(self, logentry): + self.rev = int(logentry.getAttribute('revision')) + for lpart in filter(None, logentry.childNodes): + if lpart.nodeType == lpart.ELEMENT_NODE: + if lpart.nodeName == 'author': + self.author = lpart.firstChild.nodeValue + elif lpart.nodeName == 'msg': + if lpart.firstChild: + self.msg = lpart.firstChild.nodeValue + else: + self.msg = '' + elif lpart.nodeName == 'date': + #2003-07-29T20:05:11.598637Z + timestr = lpart.firstChild.nodeValue + self.date = parse_apr_time(timestr) + elif lpart.nodeName == 'paths': + self.strpaths = [] + for ppart in filter(None, lpart.childNodes): + if ppart.nodeType == ppart.ELEMENT_NODE: + self.strpaths.append(PathEntry(ppart)) + def __repr__(self): + return '' % ( + self.rev, self.author, self.date) + + diff --git a/third_party/python/py/py/_process/__init__.py b/third_party/python/py/py/_process/__init__.py index 86c714ad1aed3..3186a2db398fe 100644 --- a/third_party/python/py/py/_process/__init__.py +++ b/third_party/python/py/py/_process/__init__.py @@ -1 +1 @@ -""" high-level sub-process handling """ +""" high-level sub-process handling """ diff --git a/third_party/python/py/py/_process/cmdexec.py b/third_party/python/py/py/_process/cmdexec.py index f83a249402968..22a1a82bd333a 100644 --- a/third_party/python/py/py/_process/cmdexec.py +++ b/third_party/python/py/py/_process/cmdexec.py @@ -1,49 +1,49 @@ -import sys -import subprocess -import py -from subprocess import Popen, PIPE - -def cmdexec(cmd): - """ return unicode output of executing 'cmd' in a separate process. - - raise cmdexec.Error exeception if the command failed. - the exception will provide an 'err' attribute containing - the error-output from the command. - if the subprocess module does not provide a proper encoding/unicode strings - sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'. - """ - process = subprocess.Popen(cmd, shell=True, - universal_newlines=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = process.communicate() - if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not - try: - default_encoding = sys.getdefaultencoding() # jython may not have it - except AttributeError: - default_encoding = sys.stdout.encoding or 'UTF-8' - out = unicode(out, process.stdout.encoding or default_encoding) - err = unicode(err, process.stderr.encoding or default_encoding) - status = process.poll() - if status: - raise ExecutionFailed(status, status, cmd, out, err) - return out - -class ExecutionFailed(py.error.Error): - def __init__(self, status, systemstatus, cmd, out, err): - Exception.__init__(self) - self.status = status - self.systemstatus = systemstatus - self.cmd = cmd - self.err = err - self.out = out - - def __str__(self): - return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) - -# export the exception under the name 'py.process.cmdexec.Error' -cmdexec.Error = ExecutionFailed -try: - ExecutionFailed.__module__ = 'py.process.cmdexec' - ExecutionFailed.__name__ = 'Error' -except (AttributeError, TypeError): - pass +import sys +import subprocess +import py +from subprocess import Popen, PIPE + +def cmdexec(cmd): + """ return unicode output of executing 'cmd' in a separate process. + + raise cmdexec.Error exeception if the command failed. + the exception will provide an 'err' attribute containing + the error-output from the command. + if the subprocess module does not provide a proper encoding/unicode strings + sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'. + """ + process = subprocess.Popen(cmd, shell=True, + universal_newlines=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = process.communicate() + if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not + try: + default_encoding = sys.getdefaultencoding() # jython may not have it + except AttributeError: + default_encoding = sys.stdout.encoding or 'UTF-8' + out = unicode(out, process.stdout.encoding or default_encoding) + err = unicode(err, process.stderr.encoding or default_encoding) + status = process.poll() + if status: + raise ExecutionFailed(status, status, cmd, out, err) + return out + +class ExecutionFailed(py.error.Error): + def __init__(self, status, systemstatus, cmd, out, err): + Exception.__init__(self) + self.status = status + self.systemstatus = systemstatus + self.cmd = cmd + self.err = err + self.out = out + + def __str__(self): + return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err) + +# export the exception under the name 'py.process.cmdexec.Error' +cmdexec.Error = ExecutionFailed +try: + ExecutionFailed.__module__ = 'py.process.cmdexec' + ExecutionFailed.__name__ = 'Error' +except (AttributeError, TypeError): + pass diff --git a/third_party/python/py/py/_process/forkedfunc.py b/third_party/python/py/py/_process/forkedfunc.py index 1c285306884bf..d23e6532b412d 100644 --- a/third_party/python/py/py/_process/forkedfunc.py +++ b/third_party/python/py/py/_process/forkedfunc.py @@ -1,120 +1,120 @@ - -""" - ForkedFunc provides a way to run a function in a forked process - and get at its return value, stdout and stderr output as well - as signals and exitstatusus. -""" - -import py -import os -import sys -import marshal - - -def get_unbuffered_io(fd, filename): - f = open(str(filename), "w") - if fd != f.fileno(): - os.dup2(f.fileno(), fd) - class AutoFlush: - def write(self, data): - f.write(data) - f.flush() - def __getattr__(self, name): - return getattr(f, name) - return AutoFlush() - - -class ForkedFunc: - EXITSTATUS_EXCEPTION = 3 - - - def __init__(self, fun, args=None, kwargs=None, nice_level=0, - child_on_start=None, child_on_exit=None): - if args is None: - args = [] - if kwargs is None: - kwargs = {} - self.fun = fun - self.args = args - self.kwargs = kwargs - self.tempdir = tempdir = py.path.local.mkdtemp() - self.RETVAL = tempdir.ensure('retval') - self.STDOUT = tempdir.ensure('stdout') - self.STDERR = tempdir.ensure('stderr') - - pid = os.fork() - if pid: # in parent process - self.pid = pid - else: # in child process - self.pid = None - self._child(nice_level, child_on_start, child_on_exit) - - def _child(self, nice_level, child_on_start, child_on_exit): - # right now we need to call a function, but first we need to - # map all IO that might happen - sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT) - sys.stderr = stderr = get_unbuffered_io(2, self.STDERR) - retvalf = self.RETVAL.open("wb") - EXITSTATUS = 0 - try: - if nice_level: - os.nice(nice_level) - try: - if child_on_start is not None: - child_on_start() - retval = self.fun(*self.args, **self.kwargs) - retvalf.write(marshal.dumps(retval)) - if child_on_exit is not None: - child_on_exit() - except: - excinfo = py.code.ExceptionInfo() - stderr.write(str(excinfo._getreprcrash())) - EXITSTATUS = self.EXITSTATUS_EXCEPTION - finally: - stdout.close() - stderr.close() - retvalf.close() - os.close(1) - os.close(2) - os._exit(EXITSTATUS) - - def waitfinish(self, waiter=os.waitpid): - pid, systemstatus = waiter(self.pid, 0) - if systemstatus: - if os.WIFSIGNALED(systemstatus): - exitstatus = os.WTERMSIG(systemstatus) + 128 - else: - exitstatus = os.WEXITSTATUS(systemstatus) - else: - exitstatus = 0 - signal = systemstatus & 0x7f - if not exitstatus and not signal: - retval = self.RETVAL.open('rb') - try: - retval_data = retval.read() - finally: - retval.close() - retval = marshal.loads(retval_data) - else: - retval = None - stdout = self.STDOUT.read() - stderr = self.STDERR.read() - self._removetemp() - return Result(exitstatus, signal, retval, stdout, stderr) - - def _removetemp(self): - if self.tempdir.check(): - self.tempdir.remove() - - def __del__(self): - if self.pid is not None: # only clean up in main process - self._removetemp() - - -class Result(object): - def __init__(self, exitstatus, signal, retval, stdout, stderr): - self.exitstatus = exitstatus - self.signal = signal - self.retval = retval - self.out = stdout - self.err = stderr + +""" + ForkedFunc provides a way to run a function in a forked process + and get at its return value, stdout and stderr output as well + as signals and exitstatusus. +""" + +import py +import os +import sys +import marshal + + +def get_unbuffered_io(fd, filename): + f = open(str(filename), "w") + if fd != f.fileno(): + os.dup2(f.fileno(), fd) + class AutoFlush: + def write(self, data): + f.write(data) + f.flush() + def __getattr__(self, name): + return getattr(f, name) + return AutoFlush() + + +class ForkedFunc: + EXITSTATUS_EXCEPTION = 3 + + + def __init__(self, fun, args=None, kwargs=None, nice_level=0, + child_on_start=None, child_on_exit=None): + if args is None: + args = [] + if kwargs is None: + kwargs = {} + self.fun = fun + self.args = args + self.kwargs = kwargs + self.tempdir = tempdir = py.path.local.mkdtemp() + self.RETVAL = tempdir.ensure('retval') + self.STDOUT = tempdir.ensure('stdout') + self.STDERR = tempdir.ensure('stderr') + + pid = os.fork() + if pid: # in parent process + self.pid = pid + else: # in child process + self.pid = None + self._child(nice_level, child_on_start, child_on_exit) + + def _child(self, nice_level, child_on_start, child_on_exit): + # right now we need to call a function, but first we need to + # map all IO that might happen + sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT) + sys.stderr = stderr = get_unbuffered_io(2, self.STDERR) + retvalf = self.RETVAL.open("wb") + EXITSTATUS = 0 + try: + if nice_level: + os.nice(nice_level) + try: + if child_on_start is not None: + child_on_start() + retval = self.fun(*self.args, **self.kwargs) + retvalf.write(marshal.dumps(retval)) + if child_on_exit is not None: + child_on_exit() + except: + excinfo = py.code.ExceptionInfo() + stderr.write(str(excinfo._getreprcrash())) + EXITSTATUS = self.EXITSTATUS_EXCEPTION + finally: + stdout.close() + stderr.close() + retvalf.close() + os.close(1) + os.close(2) + os._exit(EXITSTATUS) + + def waitfinish(self, waiter=os.waitpid): + pid, systemstatus = waiter(self.pid, 0) + if systemstatus: + if os.WIFSIGNALED(systemstatus): + exitstatus = os.WTERMSIG(systemstatus) + 128 + else: + exitstatus = os.WEXITSTATUS(systemstatus) + else: + exitstatus = 0 + signal = systemstatus & 0x7f + if not exitstatus and not signal: + retval = self.RETVAL.open('rb') + try: + retval_data = retval.read() + finally: + retval.close() + retval = marshal.loads(retval_data) + else: + retval = None + stdout = self.STDOUT.read() + stderr = self.STDERR.read() + self._removetemp() + return Result(exitstatus, signal, retval, stdout, stderr) + + def _removetemp(self): + if self.tempdir.check(): + self.tempdir.remove() + + def __del__(self): + if self.pid is not None: # only clean up in main process + self._removetemp() + + +class Result(object): + def __init__(self, exitstatus, signal, retval, stdout, stderr): + self.exitstatus = exitstatus + self.signal = signal + self.retval = retval + self.out = stdout + self.err = stderr diff --git a/third_party/python/py/py/_process/killproc.py b/third_party/python/py/py/_process/killproc.py index 18e8310b5f6ca..5b6032c7bad1b 100644 --- a/third_party/python/py/py/_process/killproc.py +++ b/third_party/python/py/py/_process/killproc.py @@ -1,23 +1,23 @@ -import py -import os, sys - -if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': - try: - import ctypes - except ImportError: - def dokill(pid): - py.process.cmdexec("taskkill /F /PID %d" %(pid,)) - else: - def dokill(pid): - PROCESS_TERMINATE = 1 - handle = ctypes.windll.kernel32.OpenProcess( - PROCESS_TERMINATE, False, pid) - ctypes.windll.kernel32.TerminateProcess(handle, -1) - ctypes.windll.kernel32.CloseHandle(handle) -else: - def dokill(pid): - os.kill(pid, 15) - -def kill(pid): - """ kill process by id. """ - dokill(pid) +import py +import os, sys + +if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': + try: + import ctypes + except ImportError: + def dokill(pid): + py.process.cmdexec("taskkill /F /PID %d" %(pid,)) + else: + def dokill(pid): + PROCESS_TERMINATE = 1 + handle = ctypes.windll.kernel32.OpenProcess( + PROCESS_TERMINATE, False, pid) + ctypes.windll.kernel32.TerminateProcess(handle, -1) + ctypes.windll.kernel32.CloseHandle(handle) +else: + def dokill(pid): + os.kill(pid, 15) + +def kill(pid): + """ kill process by id. """ + dokill(pid) diff --git a/third_party/python/py/py/_std.py b/third_party/python/py/py/_std.py index 97a9853323bf0..e016bc80f0e8a 100644 --- a/third_party/python/py/py/_std.py +++ b/third_party/python/py/py/_std.py @@ -1,18 +1,18 @@ -import sys - -class Std(object): - """ makes top-level python modules available as an attribute, - importing them on first access. - """ - - def __init__(self): - self.__dict__ = sys.modules - - def __getattr__(self, name): - try: - m = __import__(name) - except ImportError: - raise AttributeError("py.std: could not import %s" % name) - return m - -std = Std() +import sys + +class Std(object): + """ makes top-level python modules available as an attribute, + importing them on first access. + """ + + def __init__(self): + self.__dict__ = sys.modules + + def __getattr__(self, name): + try: + m = __import__(name) + except ImportError: + raise AttributeError("py.std: could not import %s" % name) + return m + +std = Std() diff --git a/third_party/python/py/py/_xmlgen.py b/third_party/python/py/py/_xmlgen.py index 2ffcaa14b8ec3..f4d411654a374 100644 --- a/third_party/python/py/py/_xmlgen.py +++ b/third_party/python/py/py/_xmlgen.py @@ -1,253 +1,255 @@ -""" -module for generating and serializing xml and html structures -by using simple python objects. - -(c) holger krekel, holger at merlinux eu. 2009 -""" -import sys, re - -if sys.version_info >= (3,0): - def u(s): - return s - def unicode(x, errors=None): - if hasattr(x, '__unicode__'): - return x.__unicode__() - return str(x) -else: - def u(s): - return unicode(s) - unicode = unicode - - -class NamespaceMetaclass(type): - def __getattr__(self, name): - if name[:1] == '_': - raise AttributeError(name) - if self == Namespace: - raise ValueError("Namespace class is abstract") - tagspec = self.__tagspec__ - if tagspec is not None and name not in tagspec: - raise AttributeError(name) - classattr = {} - if self.__stickyname__: - classattr['xmlname'] = name - cls = type(name, (self.__tagclass__,), classattr) - setattr(self, name, cls) - return cls - -class Tag(list): - class Attr(object): - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - def __init__(self, *args, **kwargs): - super(Tag, self).__init__(args) - self.attr = self.Attr(**kwargs) - - def __unicode__(self): - return self.unicode(indent=0) - __str__ = __unicode__ - - def unicode(self, indent=2): - l = [] - SimpleUnicodeVisitor(l.append, indent).visit(self) - return u("").join(l) - - def __repr__(self): - name = self.__class__.__name__ - return "<%r tag object %d>" % (name, id(self)) - -Namespace = NamespaceMetaclass('Namespace', (object, ), { - '__tagspec__': None, - '__tagclass__': Tag, - '__stickyname__': False, -}) - -class HtmlTag(Tag): - def unicode(self, indent=2): - l = [] - HtmlVisitor(l.append, indent, shortempty=False).visit(self) - return u("").join(l) - -# exported plain html namespace -class html(Namespace): - __tagclass__ = HtmlTag - __stickyname__ = True - __tagspec__ = dict([(x,1) for x in ( - 'a,abbr,acronym,address,applet,area,b,bdo,big,blink,' - 'blockquote,body,br,button,caption,center,cite,code,col,' - 'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,' - 'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,' - 'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,' - 'map,marquee,menu,meta,multicol,nobr,noembed,noframes,' - 'noscript,object,ol,optgroup,option,p,pre,q,s,script,' - 'select,small,span,strike,strong,style,sub,sup,table,' - 'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,' - 'base,basefont,frame,hr,isindex,param,samp,var' - ).split(',') if x]) - - class Style(object): - def __init__(self, **kw): - for x, y in kw.items(): - x = x.replace('_', '-') - setattr(self, x, y) - - -class raw(object): - """just a box that can contain a unicode string that will be - included directly in the output""" - def __init__(self, uniobj): - self.uniobj = uniobj - -class SimpleUnicodeVisitor(object): - """ recursive visitor to write unicode. """ - def __init__(self, write, indent=0, curindent=0, shortempty=True): - self.write = write - self.cache = {} - self.visited = {} # for detection of recursion - self.indent = indent - self.curindent = curindent - self.parents = [] - self.shortempty = shortempty # short empty tags or not - - def visit(self, node): - """ dispatcher on node's class/bases name. """ - cls = node.__class__ - try: - visitmethod = self.cache[cls] - except KeyError: - for subclass in cls.__mro__: - visitmethod = getattr(self, subclass.__name__, None) - if visitmethod is not None: - break - else: - visitmethod = self.__object - self.cache[cls] = visitmethod - visitmethod(node) - - # the default fallback handler is marked private - # to avoid clashes with the tag name object - def __object(self, obj): - #self.write(obj) - self.write(escape(unicode(obj))) - - def raw(self, obj): - self.write(obj.uniobj) - - def list(self, obj): - assert id(obj) not in self.visited - self.visited[id(obj)] = 1 - for elem in obj: - self.visit(elem) - - def Tag(self, tag): - assert id(tag) not in self.visited - try: - tag.parent = self.parents[-1] - except IndexError: - tag.parent = None - self.visited[id(tag)] = 1 - tagname = getattr(tag, 'xmlname', tag.__class__.__name__) - if self.curindent and not self._isinline(tagname): - self.write("\n" + u(' ') * self.curindent) - if tag: - self.curindent += self.indent - self.write(u('<%s%s>') % (tagname, self.attributes(tag))) - self.parents.append(tag) - for x in tag: - self.visit(x) - self.parents.pop() - self.write(u('') % tagname) - self.curindent -= self.indent - else: - nameattr = tagname+self.attributes(tag) - if self._issingleton(tagname): - self.write(u('<%s/>') % (nameattr,)) - else: - self.write(u('<%s>') % (nameattr, tagname)) - - def attributes(self, tag): - # serialize attributes - attrlist = dir(tag.attr) - attrlist.sort() - l = [] - for name in attrlist: - res = self.repr_attribute(tag.attr, name) - if res is not None: - l.append(res) - l.extend(self.getstyle(tag)) - return u("").join(l) - - def repr_attribute(self, attrs, name): - if name[:2] != '__': - value = getattr(attrs, name) - if name.endswith('_'): - name = name[:-1] - if isinstance(value, raw): - insert = value.uniobj - else: - insert = escape(unicode(value)) - return ' %s="%s"' % (name, insert) - - def getstyle(self, tag): - """ return attribute list suitable for styling. """ - try: - styledict = tag.style.__dict__ - except AttributeError: - return [] - else: - stylelist = [x+': ' + y for x,y in styledict.items()] - return [u(' style="%s"') % u('; ').join(stylelist)] - - def _issingleton(self, tagname): - """can (and will) be overridden in subclasses""" - return self.shortempty - - def _isinline(self, tagname): - """can (and will) be overridden in subclasses""" - return False - -class HtmlVisitor(SimpleUnicodeVisitor): - - single = dict([(x, 1) for x in - ('br,img,area,param,col,hr,meta,link,base,' - 'input,frame').split(',')]) - inline = dict([(x, 1) for x in - ('a abbr acronym b basefont bdo big br cite code dfn em font ' - 'i img input kbd label q s samp select small span strike ' - 'strong sub sup textarea tt u var'.split(' '))]) - - def repr_attribute(self, attrs, name): - if name == 'class_': - value = getattr(attrs, name) - if value is None: - return - return super(HtmlVisitor, self).repr_attribute(attrs, name) - - def _issingleton(self, tagname): - return tagname in self.single - - def _isinline(self, tagname): - return tagname in self.inline - - -class _escape: - def __init__(self): - self.escape = { - u('"') : u('"'), u('<') : u('<'), u('>') : u('>'), - u('&') : u('&'), u("'") : u('''), - } - self.charef_rex = re.compile(u("|").join(self.escape.keys())) - - def _replacer(self, match): - return self.escape[match.group(0)] - - def __call__(self, ustring): - """ xml-escape the given unicode string. """ - try: - ustring = unicode(ustring) - except UnicodeDecodeError: - ustring = unicode(ustring, 'utf-8', errors='replace') - return self.charef_rex.sub(self._replacer, ustring) - -escape = _escape() +""" +module for generating and serializing xml and html structures +by using simple python objects. + +(c) holger krekel, holger at merlinux eu. 2009 +""" +import sys, re + +if sys.version_info >= (3,0): + def u(s): + return s + def unicode(x, errors=None): + if hasattr(x, '__unicode__'): + return x.__unicode__() + return str(x) +else: + def u(s): + return unicode(s) + unicode = unicode + + +class NamespaceMetaclass(type): + def __getattr__(self, name): + if name[:1] == '_': + raise AttributeError(name) + if self == Namespace: + raise ValueError("Namespace class is abstract") + tagspec = self.__tagspec__ + if tagspec is not None and name not in tagspec: + raise AttributeError(name) + classattr = {} + if self.__stickyname__: + classattr['xmlname'] = name + cls = type(name, (self.__tagclass__,), classattr) + setattr(self, name, cls) + return cls + +class Tag(list): + class Attr(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __init__(self, *args, **kwargs): + super(Tag, self).__init__(args) + self.attr = self.Attr(**kwargs) + + def __unicode__(self): + return self.unicode(indent=0) + __str__ = __unicode__ + + def unicode(self, indent=2): + l = [] + SimpleUnicodeVisitor(l.append, indent).visit(self) + return u("").join(l) + + def __repr__(self): + name = self.__class__.__name__ + return "<%r tag object %d>" % (name, id(self)) + +Namespace = NamespaceMetaclass('Namespace', (object, ), { + '__tagspec__': None, + '__tagclass__': Tag, + '__stickyname__': False, +}) + +class HtmlTag(Tag): + def unicode(self, indent=2): + l = [] + HtmlVisitor(l.append, indent, shortempty=False).visit(self) + return u("").join(l) + +# exported plain html namespace +class html(Namespace): + __tagclass__ = HtmlTag + __stickyname__ = True + __tagspec__ = dict([(x,1) for x in ( + 'a,abbr,acronym,address,applet,area,article,aside,audio,b,' + 'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,' + 'canvas,caption,center,cite,code,col,colgroup,command,comment,' + 'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,' + 'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,' + 'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,' + 'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,' + 'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,' + 'noscript,object,ol,optgroup,option,output,p,param,pre,progress,' + 'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,' + 'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,' + 'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr' + ).split(',') if x]) + + class Style(object): + def __init__(self, **kw): + for x, y in kw.items(): + x = x.replace('_', '-') + setattr(self, x, y) + + +class raw(object): + """just a box that can contain a unicode string that will be + included directly in the output""" + def __init__(self, uniobj): + self.uniobj = uniobj + +class SimpleUnicodeVisitor(object): + """ recursive visitor to write unicode. """ + def __init__(self, write, indent=0, curindent=0, shortempty=True): + self.write = write + self.cache = {} + self.visited = {} # for detection of recursion + self.indent = indent + self.curindent = curindent + self.parents = [] + self.shortempty = shortempty # short empty tags or not + + def visit(self, node): + """ dispatcher on node's class/bases name. """ + cls = node.__class__ + try: + visitmethod = self.cache[cls] + except KeyError: + for subclass in cls.__mro__: + visitmethod = getattr(self, subclass.__name__, None) + if visitmethod is not None: + break + else: + visitmethod = self.__object + self.cache[cls] = visitmethod + visitmethod(node) + + # the default fallback handler is marked private + # to avoid clashes with the tag name object + def __object(self, obj): + #self.write(obj) + self.write(escape(unicode(obj))) + + def raw(self, obj): + self.write(obj.uniobj) + + def list(self, obj): + assert id(obj) not in self.visited + self.visited[id(obj)] = 1 + for elem in obj: + self.visit(elem) + + def Tag(self, tag): + assert id(tag) not in self.visited + try: + tag.parent = self.parents[-1] + except IndexError: + tag.parent = None + self.visited[id(tag)] = 1 + tagname = getattr(tag, 'xmlname', tag.__class__.__name__) + if self.curindent and not self._isinline(tagname): + self.write("\n" + u(' ') * self.curindent) + if tag: + self.curindent += self.indent + self.write(u('<%s%s>') % (tagname, self.attributes(tag))) + self.parents.append(tag) + for x in tag: + self.visit(x) + self.parents.pop() + self.write(u('') % tagname) + self.curindent -= self.indent + else: + nameattr = tagname+self.attributes(tag) + if self._issingleton(tagname): + self.write(u('<%s/>') % (nameattr,)) + else: + self.write(u('<%s>') % (nameattr, tagname)) + + def attributes(self, tag): + # serialize attributes + attrlist = dir(tag.attr) + attrlist.sort() + l = [] + for name in attrlist: + res = self.repr_attribute(tag.attr, name) + if res is not None: + l.append(res) + l.extend(self.getstyle(tag)) + return u("").join(l) + + def repr_attribute(self, attrs, name): + if name[:2] != '__': + value = getattr(attrs, name) + if name.endswith('_'): + name = name[:-1] + if isinstance(value, raw): + insert = value.uniobj + else: + insert = escape(unicode(value)) + return ' %s="%s"' % (name, insert) + + def getstyle(self, tag): + """ return attribute list suitable for styling. """ + try: + styledict = tag.style.__dict__ + except AttributeError: + return [] + else: + stylelist = [x+': ' + y for x,y in styledict.items()] + return [u(' style="%s"') % u('; ').join(stylelist)] + + def _issingleton(self, tagname): + """can (and will) be overridden in subclasses""" + return self.shortempty + + def _isinline(self, tagname): + """can (and will) be overridden in subclasses""" + return False + +class HtmlVisitor(SimpleUnicodeVisitor): + + single = dict([(x, 1) for x in + ('br,img,area,param,col,hr,meta,link,base,' + 'input,frame').split(',')]) + inline = dict([(x, 1) for x in + ('a abbr acronym b basefont bdo big br cite code dfn em font ' + 'i img input kbd label q s samp select small span strike ' + 'strong sub sup textarea tt u var'.split(' '))]) + + def repr_attribute(self, attrs, name): + if name == 'class_': + value = getattr(attrs, name) + if value is None: + return + return super(HtmlVisitor, self).repr_attribute(attrs, name) + + def _issingleton(self, tagname): + return tagname in self.single + + def _isinline(self, tagname): + return tagname in self.inline + + +class _escape: + def __init__(self): + self.escape = { + u('"') : u('"'), u('<') : u('<'), u('>') : u('>'), + u('&') : u('&'), u("'") : u('''), + } + self.charef_rex = re.compile(u("|").join(self.escape.keys())) + + def _replacer(self, match): + return self.escape[match.group(0)] + + def __call__(self, ustring): + """ xml-escape the given unicode string. """ + try: + ustring = unicode(ustring) + except UnicodeDecodeError: + ustring = unicode(ustring, 'utf-8', errors='replace') + return self.charef_rex.sub(self._replacer, ustring) + +escape = _escape() diff --git a/third_party/python/py/py/test.py b/third_party/python/py/py/test.py index aa5beb1789f05..84e888171a43e 100644 --- a/third_party/python/py/py/test.py +++ b/third_party/python/py/py/test.py @@ -1,10 +1,10 @@ -import sys -if __name__ == '__main__': - import pytest - sys.exit(pytest.main()) -else: - import sys, pytest - sys.modules['py.test'] = pytest - -# for more API entry points see the 'tests' definition -# in __init__.py +import sys +if __name__ == '__main__': + import pytest + sys.exit(pytest.main()) +else: + import sys, pytest + sys.modules['py.test'] = pytest + +# for more API entry points see the 'tests' definition +# in __init__.py diff --git a/third_party/python/py/setup.cfg b/third_party/python/py/setup.cfg deleted file mode 100644 index be0b2a5c8a141..0000000000000 --- a/third_party/python/py/setup.cfg +++ /dev/null @@ -1,11 +0,0 @@ -[wheel] -universal = 1 - -[devpi:upload] -formats = sdist.tgz,bdist_wheel - -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - diff --git a/third_party/python/py/setup.py b/third_party/python/py/setup.py deleted file mode 100644 index 06f0885cd71a9..0000000000000 --- a/third_party/python/py/setup.py +++ /dev/null @@ -1,38 +0,0 @@ -import os, sys - -from setuptools import setup - -def main(): - setup( - name='py', - description='library with cross-python path, ini-parsing, io, code, log facilities', - long_description = open('README.txt').read(), - version='1.4.31', - url='http://pylib.readthedocs.org/', - license='MIT license', - platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], - author='holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others', - author_email='pytest-dev@python.org', - classifiers=['Development Status :: 6 - Mature', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS :: MacOS X', - 'Topic :: Software Development :: Testing', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3'], - packages=['py', - 'py._code', - 'py._io', - 'py._log', - 'py._path', - 'py._process', - ], - zip_safe=False, - ) - -if __name__ == '__main__': - main() diff --git a/third_party/python/pytest/.coveragerc b/third_party/python/pytest/.coveragerc deleted file mode 100644 index 27db64e09c125..0000000000000 --- a/third_party/python/pytest/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -omit = - # standlonetemplate is read dynamically and tested by test_genscript - *standalonetemplate.py - # oldinterpret could be removed, as it is no longer used in py26+ - *oldinterpret.py - vendored_packages diff --git a/third_party/python/pytest/AUTHORS b/third_party/python/pytest/AUTHORS deleted file mode 100644 index f4a21b22dd4a3..0000000000000 --- a/third_party/python/pytest/AUTHORS +++ /dev/null @@ -1,91 +0,0 @@ -Holger Krekel, holger at merlinux eu -merlinux GmbH, Germany, office at merlinux eu - -Contributors include:: - -Abhijeet Kasurde -Anatoly Bubenkoff -Andreas Zeidler -Andy Freeland -Anthon van der Neut -Armin Rigo -Aron Curzon -Aviv Palivoda -Benjamin Peterson -Bob Ippolito -Brian Dorsey -Brian Okken -Brianna Laugher -Bruno Oliveira -Carl Friedrich Bolz -Charles Cloud -Chris Lamb -Christian Theunert -Christian Tismer -Christopher Gilling -Daniel Grana -Daniel Hahler -Daniel Nuri -Dave Hunt -David Mohr -David Vierra -Edison Gustavo Muenz -Eduardo Schettino -Endre Galaczi -Elizaveta Shashkova -Eric Hunsberger -Eric Siegerman -Erik M. Bray -Florian Bruhin -Floris Bruynooghe -Gabriel Reis -Georgy Dyuldin -Graham Horler -Grig Gheorghiu -Guido Wesdorp -Harald Armin Massa -Ian Bicking -Jaap Broekhuizen -Jan Balster -Janne Vanhala -Jason R. Coombs -John Towler -Joshua Bronson -Jurko Gospodnetić -Katarzyna Jachim -Kevin Cox -Lee Kamentsky -Lukas Bednar -Maciek Fijalkowski -Maho -Marc Schlaich -Mark Abramowitz -Markus Unterwaditzer -Martijn Faassen -Martin Prusse -Matt Bachmann -Michael Aquilina -Michael Birtwell -Michael Droettboom -Nicolas Delaby -Pieter Mulder -Piotr Banaszkiewicz -Punyashloka Biswal -Quentin Pradet -Ralf Schmitt -Raphael Pierzina -Ronny Pfannschmidt -Ross Lawley -Ryan Wooden -Samuele Pedroni -Tom Viner -Trevor Bekolay -Wouter van Ackooy -David Díaz-Barquero -Eric Hunsberger -Simon Gomizelj -Russel Winder -Ben Webb -Alexei Kozlenok -Cal Leeming -Feng Ma diff --git a/third_party/python/pytest/LICENSE b/third_party/python/pytest/LICENSE deleted file mode 100644 index 9e27bd7841951..0000000000000 --- a/third_party/python/pytest/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2004-2016 Holger Krekel and others - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/third_party/python/pytest/MANIFEST.in b/third_party/python/pytest/MANIFEST.in deleted file mode 100644 index 266a9184dc369..0000000000000 --- a/third_party/python/pytest/MANIFEST.in +++ /dev/null @@ -1,34 +0,0 @@ -include CHANGELOG.rst -include LICENSE -include AUTHORS - -include README.rst -include CONTRIBUTING.rst - -include tox.ini -include setup.py - -include .coveragerc - -include plugin-test.sh -include requirements-docs.txt -include runtox.py - -recursive-include bench *.py -recursive-include extra *.py - -graft testing -graft doc - -exclude _pytest/impl - -graft _pytest/vendored_packages - -recursive-exclude * *.pyc *.pyo - -exclude appveyor/install.ps1 -exclude appveyor.yml -exclude appveyor - -exclude ISSUES.txt -exclude HOWTORELEASE.rst diff --git a/third_party/python/pytest/PKG-INFO b/third_party/python/pytest/PKG-INFO deleted file mode 100644 index 7b801be0d7a6f..0000000000000 --- a/third_party/python/pytest/PKG-INFO +++ /dev/null @@ -1,133 +0,0 @@ -Metadata-Version: 1.1 -Name: pytest -Version: 2.9.2 -Summary: pytest: simple powerful testing with Python -Home-page: http://pytest.org -Author: Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others -Author-email: holger at merlinux.eu -License: MIT license -Description: .. image:: http://pytest.org/latest/_static/pytest1.png - :target: http://pytest.org - :align: center - :alt: pytest - - ------ - - .. image:: https://img.shields.io/pypi/v/pytest.svg - :target: https://pypi.python.org/pypi/pytest - .. image:: https://img.shields.io/pypi/pyversions/pytest.svg - :target: https://pypi.python.org/pypi/pytest - .. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg - :target: https://coveralls.io/r/pytest-dev/pytest - .. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master - :target: https://travis-ci.org/pytest-dev/pytest - .. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true - :target: https://ci.appveyor.com/project/pytestbot/pytest - - The ``pytest`` framework makes it easy to write small tests, yet - scales to support complex functional testing for applications and libraries. - - An example of a simple test: - - .. code-block:: python - - # content of test_sample.py - def func(x): - return x + 1 - - def test_answer(): - assert func(3) == 5 - - - To execute it:: - - $ py.test - ======= test session starts ======== - platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1 - collected 1 items - - test_sample.py F - - ======= FAILURES ======== - _______ test_answer ________ - - def test_answer(): - > assert func(3) == 5 - E assert 4 == 5 - E + where 4 = func(3) - - test_sample.py:5: AssertionError - ======= 1 failed in 0.12 seconds ======== - - Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started `_ for more examples. - - - Features - -------- - - - Detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names); - - - `Auto-discovery - `_ - of test modules and functions; - - - `Modular fixtures `_ for - managing small or parametrized long-lived test resources; - - - Can run `unittest `_ (or trial), - `nose `_ test suites out of the box; - - - Python2.6+, Python3.2+, PyPy-2.3, Jython-2.5 (untested); - - - Rich plugin architecture, with over 150+ `external plugins `_ and thriving community; - - - Documentation - ------------- - - For full documentation, including installation, tutorials and PDF documents, please see http://pytest.org. - - - Bugs/Requests - ------------- - - Please use the `GitHub issue tracker `_ to submit bugs or request features. - - - Changelog - --------- - - Consult the `Changelog `_ page for fixes and enhancements of each version. - - - License - ------- - - Copyright Holger Krekel and others, 2004-2016. - - Distributed under the terms of the `MIT`_ license, pytest is free and open source software. - - .. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE - -Platform: unix -Platform: linux -Platform: osx -Platform: cygwin -Platform: win32 -Classifier: Development Status :: 6 - Mature -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: MacOS :: MacOS X -Classifier: Topic :: Software Development :: Testing -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Utilities -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 diff --git a/third_party/python/pytest/README.rst b/third_party/python/pytest/README.rst deleted file mode 100644 index 68fc92211d8be..0000000000000 --- a/third_party/python/pytest/README.rst +++ /dev/null @@ -1,102 +0,0 @@ -.. image:: http://pytest.org/latest/_static/pytest1.png - :target: http://pytest.org - :align: center - :alt: pytest - ------- - -.. image:: https://img.shields.io/pypi/v/pytest.svg - :target: https://pypi.python.org/pypi/pytest -.. image:: https://img.shields.io/pypi/pyversions/pytest.svg - :target: https://pypi.python.org/pypi/pytest -.. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg - :target: https://coveralls.io/r/pytest-dev/pytest -.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master - :target: https://travis-ci.org/pytest-dev/pytest -.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true - :target: https://ci.appveyor.com/project/pytestbot/pytest - -The ``pytest`` framework makes it easy to write small tests, yet -scales to support complex functional testing for applications and libraries. - -An example of a simple test: - -.. code-block:: python - - # content of test_sample.py - def func(x): - return x + 1 - - def test_answer(): - assert func(3) == 5 - - -To execute it:: - - $ py.test - ======= test session starts ======== - platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1 - collected 1 items - - test_sample.py F - - ======= FAILURES ======== - _______ test_answer ________ - - def test_answer(): - > assert func(3) == 5 - E assert 4 == 5 - E + where 4 = func(3) - - test_sample.py:5: AssertionError - ======= 1 failed in 0.12 seconds ======== - -Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started `_ for more examples. - - -Features --------- - -- Detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names); - -- `Auto-discovery - `_ - of test modules and functions; - -- `Modular fixtures `_ for - managing small or parametrized long-lived test resources; - -- Can run `unittest `_ (or trial), - `nose `_ test suites out of the box; - -- Python2.6+, Python3.2+, PyPy-2.3, Jython-2.5 (untested); - -- Rich plugin architecture, with over 150+ `external plugins `_ and thriving community; - - -Documentation -------------- - -For full documentation, including installation, tutorials and PDF documents, please see http://pytest.org. - - -Bugs/Requests -------------- - -Please use the `GitHub issue tracker `_ to submit bugs or request features. - - -Changelog ---------- - -Consult the `Changelog `_ page for fixes and enhancements of each version. - - -License -------- - -Copyright Holger Krekel and others, 2004-2016. - -Distributed under the terms of the `MIT`_ license, pytest is free and open source software. - -.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE diff --git a/third_party/python/pytest/_pytest/__init__.py b/third_party/python/pytest/_pytest/__init__.py index 23dac6d0559c5..6e41f0504e470 100644 --- a/third_party/python/pytest/_pytest/__init__.py +++ b/third_party/python/pytest/_pytest/__init__.py @@ -1,2 +1,8 @@ -# -__version__ = '2.9.2' +__all__ = ['__version__'] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = 'unknown' diff --git a/third_party/python/pytest/_pytest/_argcomplete.py b/third_party/python/pytest/_pytest/_argcomplete.py index 955855a964819..8c93e4c92ce35 100644 --- a/third_party/python/pytest/_pytest/_argcomplete.py +++ b/third_party/python/pytest/_pytest/_argcomplete.py @@ -57,7 +57,7 @@ which should throw a KeyError: 'COMPLINE' (which is properly set by the global argcomplete script). """ - +from __future__ import absolute_import, division, print_function import sys import os from glob import glob @@ -87,6 +87,7 @@ def __call__(self, prefix, **kwargs): completion.append(x[prefix_dir:]) return completion + if os.environ.get('_ARGCOMPLETE'): try: import argcomplete.completers diff --git a/third_party/python/pytest/_pytest/_code/__init__.py b/third_party/python/pytest/_pytest/_code/__init__.py index c046b9716caed..815c13b42c25b 100644 --- a/third_party/python/pytest/_pytest/_code/__init__.py +++ b/third_party/python/pytest/_pytest/_code/__init__.py @@ -1,12 +1,10 @@ """ python inspection/code generation API """ +from __future__ import absolute_import, division, print_function from .code import Code # noqa from .code import ExceptionInfo # noqa from .code import Frame # noqa from .code import Traceback # noqa from .code import getrawcode # noqa -from .code import patch_builtins # noqa -from .code import unpatch_builtins # noqa from .source import Source # noqa from .source import compile_ as compile # noqa from .source import getfslineno # noqa - diff --git a/third_party/python/pytest/_pytest/_code/_py2traceback.py b/third_party/python/pytest/_pytest/_code/_py2traceback.py index a830d9899ae0c..d45ee01fa79a3 100644 --- a/third_party/python/pytest/_pytest/_code/_py2traceback.py +++ b/third_party/python/pytest/_pytest/_code/_py2traceback.py @@ -2,6 +2,7 @@ # CHANGES: # - some_str is replaced, trying to create unicode strings # +from __future__ import absolute_import, division, print_function import types def format_exception_only(etype, value): diff --git a/third_party/python/pytest/_pytest/_code/code.py b/third_party/python/pytest/_pytest/_code/code.py index 8995cc1f7107e..5b7cc41911e11 100644 --- a/third_party/python/pytest/_pytest/_code/code.py +++ b/third_party/python/pytest/_pytest/_code/code.py @@ -1,17 +1,21 @@ +from __future__ import absolute_import, division, print_function import sys from inspect import CO_VARARGS, CO_VARKEYWORDS +import re +from weakref import ref +from _pytest.compat import _PY2, _PY3, PY35, safe_str import py - builtin_repr = repr reprlib = py.builtin._tryimport('repr', 'reprlib') -if sys.version_info[0] >= 3: +if _PY3: from traceback import format_exception_only else: from ._py2traceback import format_exception_only + class Code(object): """ wrapper around Python code objects """ def __init__(self, rawcode): @@ -28,6 +32,8 @@ def __init__(self, rawcode): def __eq__(self, other): return self.raw == other.raw + __hash__ = None + def __ne__(self, other): return not self == other @@ -35,12 +41,16 @@ def __ne__(self, other): def path(self): """ return a path object pointing to source code (note that it might not point to an actually existing file). """ - p = py.path.local(self.raw.co_filename) - # maybe don't try this checking - if not p.check(): + try: + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + raise OSError("py.path check failed.") + except OSError: # XXX maybe try harder like the weird logic # in the standard lib [linecache.updatecache] does? p = self.raw.co_filename + return p @property @@ -139,7 +149,8 @@ class TracebackEntry(object): _repr_style = None exprinfo = None - def __init__(self, rawentry): + def __init__(self, rawentry, excinfo=None): + self._excinfo = excinfo self._rawentry = rawentry self.lineno = rawentry.tb_lineno - 1 @@ -174,18 +185,6 @@ def getlocals(self): return self.frame.f_locals locals = property(getlocals, None, None, "locals of underlaying frame") - def reinterpret(self): - """Reinterpret the failing statement and returns a detailed information - about what operations are performed.""" - from _pytest.assertion.reinterpret import reinterpret - if self.exprinfo is None: - source = py.builtin._totext(self.statement).strip() - x = reinterpret(source, self.frame, should_fail=True) - if not py.builtin._istext(x): - raise TypeError("interpret returned non-string %r" % (x,)) - self.exprinfo = x - return self.exprinfo - def getfirstlinesource(self): # on Jython this firstlineno can be -1 apparently return max(self.frame.code.firstlineno, 0) @@ -220,16 +219,24 @@ def ishidden(self): """ return True if the current frame has a var __tracebackhide__ resolving to True + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + mostly for internal use """ try: - return self.frame.f_locals['__tracebackhide__'] + tbh = self.frame.f_locals['__tracebackhide__'] except KeyError: try: - return self.frame.f_globals['__tracebackhide__'] + tbh = self.frame.f_globals['__tracebackhide__'] except KeyError: return False + if py.builtin.callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + else: + return tbh + def __str__(self): try: fn = str(self.path) @@ -253,12 +260,13 @@ class Traceback(list): access to Traceback entries. """ Entry = TracebackEntry - def __init__(self, tb): - """ initialize from given python traceback object. """ + def __init__(self, tb, excinfo=None): + """ initialize from given python traceback object and ExceptionInfo """ + self._excinfo = excinfo if hasattr(tb, 'tb_next'): def f(cur): while cur is not None: - yield self.Entry(cur) + yield self.Entry(cur, excinfo=excinfo) cur = cur.tb_next list.__init__(self, f(tb)) else: @@ -282,7 +290,7 @@ def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): not codepath.relto(excludepath)) and (lineno is None or x.lineno == lineno) and (firstlineno is None or x.frame.code.firstlineno == firstlineno)): - return Traceback(x._rawentry) + return Traceback(x._rawentry, self._excinfo) return self def __getitem__(self, key): @@ -301,7 +309,7 @@ def filter(self, fn=lambda x: not x.ishidden()): by default this removes all the TracebackEntries which are hidden (see ishidden() above) """ - return Traceback(filter(fn, self)) + return Traceback(filter(fn, self), self._excinfo) def getcrashentry(self): """ return last non-hidden traceback entry that lead @@ -337,6 +345,7 @@ def recursionindex(self): l.append(entry.frame.f_locals) return None + co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', '?', 'eval') @@ -345,6 +354,8 @@ class ExceptionInfo(object): help for navigating the traceback. """ _striptext = '' + _assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert " + def __init__(self, tup=None, exprinfo=None): import _pytest._code if tup is None: @@ -352,8 +363,8 @@ def __init__(self, tup=None, exprinfo=None): if exprinfo is None and isinstance(tup[1], AssertionError): exprinfo = getattr(tup[1], 'msg', None) if exprinfo is None: - exprinfo = str(tup[1]) - if exprinfo and exprinfo.startswith('assert '): + exprinfo = py.io.saferepr(tup[1]) + if exprinfo and exprinfo.startswith(self._assert_start_repr): self._striptext = 'AssertionError: ' self._excinfo = tup #: the exception class @@ -365,7 +376,7 @@ def __init__(self, tup=None, exprinfo=None): #: the exception type name self.typename = self.type.__name__ #: the exception traceback (_pytest._code.Traceback instance) - self.traceback = _pytest._code.Traceback(self.tb) + self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self)) def __repr__(self): return "" % (self.typename, len(self.traceback)) @@ -427,6 +438,19 @@ def __unicode__(self): loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) return unicode(loc) + def match(self, regexp): + """ + Match the regular expression 'regexp' on the string representation of + the exception. If it matches then True is returned (so that it is + possible to write 'assert excinfo.match()'). If it doesn't match an + AssertionError is raised. + """ + __tracebackhide__ = True + if not re.search(regexp, str(self.value)): + assert 0, "Pattern '{0!s}' not found in '{1!s}'".format( + regexp, self.value) + return True + class FormattedExcinfo(object): """ presenting information about failing Functions and Generators. """ @@ -578,30 +602,91 @@ def repr_traceback(self, excinfo): traceback = excinfo.traceback if self.tbfilter: traceback = traceback.filter() - recursionindex = None + if is_recursion_error(excinfo): - recursionindex = traceback.recursionindex() + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + last = traceback[-1] entries = [] - extraline = None for index, entry in enumerate(traceback): einfo = (last == entry) and excinfo or None reprentry = self.repr_traceback_entry(entry, einfo) entries.append(reprentry) - if index == recursionindex: - extraline = "!!! Recursion detected (same locals & position)" - break return ReprTraceback(entries, extraline, style=self.style) + def _truncate_recursive_traceback(self, traceback): + """ + Truncate the given recursive traceback trying to find the starting point + of the recursion. + + The detection is done by going through each traceback entry and finding the + point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. + + Handle the situation where the recursion process might raise an exception (for example + comparing numpy arrays using equality raises a TypeError), in which case we do our best to + warn the user of the error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline = ( + '!!! Recursion error detected, but an error occurred locating the origin of recursion.\n' + ' The following exception happened when comparing locals in the stack frame:\n' + ' {exc_type}: {exc_msg}\n' + ' Displaying first and last {max_frames} stack frames out of {total}.' + ).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback)) + traceback = traceback[:max_frames] + traceback[-max_frames:] + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[:recursionindex + 1] + else: + extraline = None + + return traceback, extraline + def repr_excinfo(self, excinfo): - reprtraceback = self.repr_traceback(excinfo) - reprcrash = excinfo._getreprcrash() - return ReprExceptionInfo(reprtraceback, reprcrash) + if _PY2: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() -class TerminalRepr: + return ReprExceptionInfo(reprtraceback, reprcrash) + else: + repr_chain = [] + e = excinfo.value + descr = None + while e is not None: + if excinfo: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + else: + # fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work + reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None)) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None: + e = e.__cause__ + excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None + descr = 'The above exception was the direct cause of the following exception:' + elif e.__context__ is not None: + e = e.__context__ + excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None + descr = 'During handling of the above exception, another exception occurred:' + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +class TerminalRepr(object): def __str__(self): s = self.__unicode__() - if sys.version_info[0] < 3: + if _PY2: s = s.encode('utf-8') return s @@ -617,21 +702,47 @@ def __repr__(self): return "<%s instance at %0x>" %(self.__class__, id(self)) -class ReprExceptionInfo(TerminalRepr): - def __init__(self, reprtraceback, reprcrash): - self.reprtraceback = reprtraceback - self.reprcrash = reprcrash +class ExceptionRepr(TerminalRepr): + def __init__(self): self.sections = [] def addsection(self, name, content, sep="-"): self.sections.append((name, content, sep)) def toterminal(self, tw): - self.reprtraceback.toterminal(tw) for name, content, sep in self.sections: tw.sep(sep, name) tw.line(content) + +class ExceptionChainRepr(ExceptionRepr): + def __init__(self, chain): + super(ExceptionChainRepr, self).__init__() + self.chain = chain + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain + self.reprtraceback = chain[-1][0] + self.reprcrash = chain[-1][1] + + def toterminal(self, tw): + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super(ExceptionChainRepr, self).toterminal(tw) + + +class ReprExceptionInfo(ExceptionRepr): + def __init__(self, reprtraceback, reprcrash): + super(ReprExceptionInfo, self).__init__() + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + super(ReprExceptionInfo, self).toterminal(tw) + class ReprTraceback(TerminalRepr): entrysep = "_ " @@ -720,7 +831,8 @@ def toterminal(self, tw): i = msg.find("\n") if i != -1: msg = msg[:i] - tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + tw.write(self.path, bold=True, red=True) + tw.line(":%s: %s" % (self.lineno, msg)) class ReprLocals(TerminalRepr): def __init__(self, lines): @@ -753,29 +865,6 @@ def toterminal(self, tw): tw.line("") - -oldbuiltins = {} - -def patch_builtins(assertion=True, compile=True): - """ put compile and AssertionError builtins to Python's builtins. """ - if assertion: - from _pytest.assertion import reinterpret - l = oldbuiltins.setdefault('AssertionError', []) - l.append(py.builtin.builtins.AssertionError) - py.builtin.builtins.AssertionError = reinterpret.AssertionError - if compile: - import _pytest._code - l = oldbuiltins.setdefault('compile', []) - l.append(py.builtin.builtins.compile) - py.builtin.builtins.compile = _pytest._code.compile - -def unpatch_builtins(assertion=True, compile=True): - """ remove compile and AssertionError builtins from Python builtins. """ - if assertion: - py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() - if compile: - py.builtin.builtins.compile = oldbuiltins['compile'].pop() - def getrawcode(obj, trycall=True): """ return code object for given function. """ try: @@ -792,7 +881,8 @@ def getrawcode(obj, trycall=True): return x return obj -if sys.version_info[:2] >= (3, 5): # RecursionError introduced in 3.5 + +if PY35: # RecursionError introduced in 3.5 def is_recursion_error(excinfo): return excinfo.errisinstance(RecursionError) # noqa else: diff --git a/third_party/python/pytest/_pytest/_code/source.py b/third_party/python/pytest/_pytest/_code/source.py index a1521f8a212a6..8e6148410a16f 100644 --- a/third_party/python/pytest/_pytest/_code/source.py +++ b/third_party/python/pytest/_pytest/_code/source.py @@ -1,10 +1,9 @@ -from __future__ import generators +from __future__ import absolute_import, division, generators, print_function from bisect import bisect_right import sys import inspect, tokenize import py -from types import ModuleType cpy_compile = compile try: @@ -52,22 +51,21 @@ def __eq__(self, other): return str(self) == other return False + __hash__ = None + def __getitem__(self, key): if isinstance(key, int): return self.lines[key] else: if key.step not in (None, 1): raise IndexError("cannot slice a Source with a step") - return self.__getslice__(key.start, key.stop) + newsource = Source() + newsource.lines = self.lines[key.start:key.stop] + return newsource def __len__(self): return len(self.lines) - def __getslice__(self, start, end): - newsource = Source() - newsource.lines = self.lines[start:end] - return newsource - def strip(self): """ return new source object with trailing and leading blank lines removed. @@ -193,14 +191,6 @@ def compile(self, filename=None, mode='exec', if flag & _AST_FLAG: return co lines = [(x + "\n") for x in self.lines] - if sys.version_info[0] >= 3: - # XXX py3's inspect.getsourcefile() checks for a module - # and a pep302 __loader__ ... we don't have a module - # at code compile-time so we need to fake it here - m = ModuleType("_pycodecompile_pseudo_module") - py.std.inspect.modulesbyfile[filename] = None - py.std.sys.modules[None] = m - m.__loader__ = 1 py.std.linecache.cache[filename] = (1, None, lines, filename) return co @@ -266,6 +256,7 @@ def findsource(obj): source.lines = [line.rstrip() for line in sourcelines] return source, lineno + def getsource(obj, **kwargs): import _pytest._code obj = _pytest._code.getrawcode(obj) @@ -276,6 +267,7 @@ def getsource(obj, **kwargs): assert isinstance(strsrc, str) return Source(strsrc, **kwargs) + def deindent(lines, offset=None): if offset is None: for line in lines: @@ -289,6 +281,7 @@ def deindent(lines, offset=None): if offset == 0: return list(lines) newlines = [] + def readline_generator(lines): for line in lines: yield line + '\n' diff --git a/third_party/python/pytest/_pytest/_pluggy.py b/third_party/python/pytest/_pytest/_pluggy.py index 87d32cf8dd12a..6cc1d3d54a657 100644 --- a/third_party/python/pytest/_pytest/_pluggy.py +++ b/third_party/python/pytest/_pytest/_pluggy.py @@ -2,7 +2,7 @@ imports symbols from vendored "pluggy" if available, otherwise falls back to importing "pluggy" from the default namespace. """ - +from __future__ import absolute_import, division, print_function try: from _pytest.vendored_packages.pluggy import * # noqa from _pytest.vendored_packages.pluggy import __version__ # noqa diff --git a/third_party/python/pytest/_pytest/_version.py b/third_party/python/pytest/_pytest/_version.py new file mode 100644 index 0000000000000..e03109e669284 --- /dev/null +++ b/third_party/python/pytest/_pytest/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '3.1.3' diff --git a/third_party/python/pytest/_pytest/assertion/__init__.py b/third_party/python/pytest/_pytest/assertion/__init__.py index 6921deb2a6068..acb034d8649af 100644 --- a/third_party/python/pytest/_pytest/assertion/__init__.py +++ b/third_party/python/pytest/_pytest/assertion/__init__.py @@ -1,11 +1,13 @@ """ support for presenting detailed information in failing assertions. """ +from __future__ import absolute_import, division, print_function import py -import os import sys -from _pytest.monkeypatch import monkeypatch + from _pytest.assertion import util +from _pytest.assertion import rewrite +from _pytest.assertion import truncate def pytest_addoption(parser): @@ -13,25 +15,46 @@ def pytest_addoption(parser): group.addoption('--assert', action="store", dest="assertmode", - choices=("rewrite", "reinterp", "plain",), + choices=("rewrite", "plain",), default="rewrite", metavar="MODE", - help="""control assertion debugging tools. 'plain' - performs no assertion debugging. 'reinterp' - reinterprets assert statements after they failed - to provide assertion expression information. - 'rewrite' (the default) rewrites assert - statements in test modules on import to - provide assert expression information. """) - group.addoption('--no-assert', - action="store_true", - default=False, - dest="noassert", - help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', '--no-magic', - action="store_true", - default=False, - help="DEPRECATED equivalent to --assert=plain") + help="""Control assertion debugging tools. 'plain' + performs no assertion debugging. 'rewrite' + (the default) rewrites assert statements in + test modules on import to provide assert + expression information.""") + + + +def register_assert_rewrite(*names): + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raise TypeError: if the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = 'expected module names as *args, got {0} instead' + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + importhook = DummyRewriteHook() + importhook.mark_rewrite(*names) + + +class DummyRewriteHook(object): + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names): + pass class AssertionState: @@ -40,57 +63,39 @@ class AssertionState: def __init__(self, config, mode): self.mode = mode self.trace = config.trace.root.get("assertion") + self.hook = None -def pytest_configure(config): - mode = config.getvalue("assertmode") - if config.getvalue("noassert") or config.getvalue("nomagic"): - mode = "plain" - if mode == "rewrite": - try: - import ast # noqa - except ImportError: - mode = "reinterp" - else: - # Both Jython and CPython 2.6.0 have AST bugs that make the - # assertion rewriting hook malfunction. - if (sys.platform.startswith('java') or - sys.version_info[:3] == (2, 6, 0)): - mode = "reinterp" - if mode != "plain": - _load_modules(mode) - m = monkeypatch() - config._cleanup.append(m.undo) - m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) # noqa - hook = None - if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() # noqa - sys.meta_path.insert(0, hook) - warn_about_missing_assertion(mode) - config._assertstate = AssertionState(config, mode) - config._assertstate.hook = hook - config._assertstate.trace("configured with mode set to %r" % (mode,)) +def install_importhook(config): + """Try to install the rewrite hook, raise SystemError if it fails.""" + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): + raise SystemError('rewrite not supported') + + config._assertstate = AssertionState(config, 'rewrite') + config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config._assertstate.trace('installed rewrite import hook') + def undo(): hook = config._assertstate.hook if hook is not None and hook in sys.meta_path: sys.meta_path.remove(hook) + config.add_cleanup(undo) + return hook def pytest_collection(session): # this hook is only called when test modules are collected # so for example not in the master process of pytest-xdist # (which does not collect test modules) - hook = session.config._assertstate.hook - if hook is not None: - hook.set_session(session) - - -def _running_on_ci(): - """Check if we're currently running on a CI system.""" - env_vars = ['CI', 'BUILD_NUMBER'] - return any(var in os.environ for var in env_vars) + assertstate = getattr(session.config, '_assertstate', None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) def pytest_runtest_setup(item): @@ -106,8 +111,8 @@ def callbinrepr(op, left, right): This uses the first result from the hook and then ensures the following: - * Overly verbose explanations are dropped unless -vv was used or - running on a CI. + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). * Embedded newlines are escaped to help util.format_explanation() later. * If the rewrite mode is used embedded %-characters are replaced @@ -120,14 +125,7 @@ def callbinrepr(op, left, right): config=item.config, op=op, left=left, right=right) for new_expl in hook_result: if new_expl: - if (sum(len(p) for p in new_expl[1:]) > 80*8 and - item.config.option.verbose < 2 and - not _running_on_ci()): - show_max = 10 - truncated_lines = len(new_expl) - show_max - new_expl[show_max:] = [py.builtin._totext( - 'Detailed information truncated (%d more lines)' - ', use "-vv" to show' % truncated_lines)] + new_expl = truncate.truncate_if_required(new_expl, item) new_expl = [line.replace("\n", "\\n") for line in new_expl] res = py.builtin._totext("\n~").join(new_expl) if item.config.getvalue("assertmode") == "rewrite": @@ -141,35 +139,10 @@ def pytest_runtest_teardown(item): def pytest_sessionfinish(session): - hook = session.config._assertstate.hook - if hook is not None: - hook.session = None - - -def _load_modules(mode): - """Lazily import assertion related code.""" - global rewrite, reinterpret - from _pytest.assertion import reinterpret # noqa - if mode == "rewrite": - from _pytest.assertion import rewrite # noqa - - -def warn_about_missing_assertion(mode): - try: - assert False - except AssertionError: - pass - else: - if mode == "rewrite": - specifically = ("assertions which are not in test modules " - "will be ignored") - else: - specifically = "failing tests may report as passing" - - sys.stderr.write("WARNING: " + specifically + - " because assert statements are not executed " - "by the underlying Python interpreter " - "(are you using python -O?)\n") + assertstate = getattr(session.config, '_assertstate', None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) # Expose this plugin's implementation for the pytest_assertrepr_compare hook diff --git a/third_party/python/pytest/_pytest/assertion/reinterpret.py b/third_party/python/pytest/_pytest/assertion/reinterpret.py deleted file mode 100644 index f4262c3aced2d..0000000000000 --- a/third_party/python/pytest/_pytest/assertion/reinterpret.py +++ /dev/null @@ -1,407 +0,0 @@ -""" -Find intermediate evalutation results in assert statements through builtin AST. -""" -import ast -import sys - -import _pytest._code -import py -from _pytest.assertion import util -u = py.builtin._totext - - -class AssertionError(util.BuiltinAssertionError): - def __init__(self, *args): - util.BuiltinAssertionError.__init__(self, *args) - if args: - # on Python2.6 we get len(args)==2 for: assert 0, (x,y) - # on Python2.7 and above we always get len(args) == 1 - # with args[0] being the (x,y) tuple. - if len(args) > 1: - toprint = args - else: - toprint = args[0] - try: - self.msg = u(toprint) - except Exception: - self.msg = u( - "<[broken __repr__] %s at %0xd>" - % (toprint.__class__, id(toprint))) - else: - f = _pytest._code.Frame(sys._getframe(1)) - try: - source = f.code.fullsource - if source is not None: - try: - source = source.getstatement(f.lineno, assertion=True) - except IndexError: - source = None - else: - source = str(source.deindent()).strip() - except py.error.ENOENT: - source = None - # this can also occur during reinterpretation, when the - # co_filename is set to "". - if source: - self.msg = reinterpret(source, f, should_fail=True) - else: - self.msg = "" - if not self.args: - self.args = (self.msg,) - -if sys.version_info > (3, 0): - AssertionError.__module__ = "builtins" - -if sys.platform.startswith("java"): - # See http://bugs.jython.org/issue1497 - _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", - "ListComp", "GeneratorExp", "Yield", "Compare", "Call", - "Repr", "Num", "Str", "Attribute", "Subscript", "Name", - "List", "Tuple") - _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", - "AugAssign", "Print", "For", "While", "If", "With", "Raise", - "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", - "Exec", "Global", "Expr", "Pass", "Break", "Continue") - _expr_nodes = set(getattr(ast, name) for name in _exprs) - _stmt_nodes = set(getattr(ast, name) for name in _stmts) - def _is_ast_expr(node): - return node.__class__ in _expr_nodes - def _is_ast_stmt(node): - return node.__class__ in _stmt_nodes -else: - def _is_ast_expr(node): - return isinstance(node, ast.expr) - def _is_ast_stmt(node): - return isinstance(node, ast.stmt) - -try: - _Starred = ast.Starred -except AttributeError: - # Python 2. Define a dummy class so isinstance() will always be False. - class _Starred(object): pass - - -class Failure(Exception): - """Error found while interpreting AST.""" - - def __init__(self, explanation=""): - self.cause = sys.exc_info() - self.explanation = explanation - - -def reinterpret(source, frame, should_fail=False): - mod = ast.parse(source) - visitor = DebugInterpreter(frame) - try: - visitor.visit(mod) - except Failure: - failure = sys.exc_info()[1] - return getfailure(failure) - if should_fail: - return ("(assertion failed, but when it was re-run for " - "printing intermediate values, it did not fail. Suggestions: " - "compute assert expression before the assert or use --assert=plain)") - -def run(offending_line, frame=None): - if frame is None: - frame = _pytest._code.Frame(sys._getframe(1)) - return reinterpret(offending_line, frame) - -def getfailure(e): - explanation = util.format_explanation(e.explanation) - value = e.cause[1] - if str(value): - lines = explanation.split('\n') - lines[0] += " << %s" % (value,) - explanation = '\n'.join(lines) - text = "%s: %s" % (e.cause[0].__name__, explanation) - if text.startswith('AssertionError: assert '): - text = text[16:] - return text - -operator_map = { - ast.BitOr : "|", - ast.BitXor : "^", - ast.BitAnd : "&", - ast.LShift : "<<", - ast.RShift : ">>", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" -} - -unary_map = { - ast.Not : "not %s", - ast.Invert : "~%s", - ast.USub : "-%s", - ast.UAdd : "+%s" -} - - -class DebugInterpreter(ast.NodeVisitor): - """Interpret AST nodes to gleam useful debugging information. """ - - def __init__(self, frame): - self.frame = frame - - def generic_visit(self, node): - # Fallback when we don't have a special implementation. - if _is_ast_expr(node): - mod = ast.Expression(node) - co = self._compile(mod) - try: - result = self.frame.eval(co) - except Exception: - raise Failure() - explanation = self.frame.repr(result) - return explanation, result - elif _is_ast_stmt(node): - mod = ast.Module([node]) - co = self._compile(mod, "exec") - try: - self.frame.exec_(co) - except Exception: - raise Failure() - return None, None - else: - raise AssertionError("can't handle %s" %(node,)) - - def _compile(self, source, mode="eval"): - return compile(source, "", mode) - - def visit_Expr(self, expr): - return self.visit(expr.value) - - def visit_Module(self, mod): - for stmt in mod.body: - self.visit(stmt) - - def visit_Name(self, name): - explanation, result = self.generic_visit(name) - # See if the name is local. - source = "%r in locals() is not globals()" % (name.id,) - co = self._compile(source) - try: - local = self.frame.eval(co) - except Exception: - # have to assume it isn't - local = None - if local is None or not self.frame.is_true(local): - return name.id, result - return explanation, result - - def visit_Compare(self, comp): - left = comp.left - left_explanation, left_result = self.visit(left) - for op, next_op in zip(comp.ops, comp.comparators): - next_explanation, next_result = self.visit(next_op) - op_symbol = operator_map[op.__class__] - explanation = "%s %s %s" % (left_explanation, op_symbol, - next_explanation) - source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) - co = self._compile(source) - try: - result = self.frame.eval(co, __exprinfo_left=left_result, - __exprinfo_right=next_result) - except Exception: - raise Failure(explanation) - try: - if not self.frame.is_true(result): - break - except KeyboardInterrupt: - raise - except: - break - left_explanation, left_result = next_explanation, next_result - - if util._reprcompare is not None: - res = util._reprcompare(op_symbol, left_result, next_result) - if res: - explanation = res - return explanation, result - - def visit_BoolOp(self, boolop): - is_or = isinstance(boolop.op, ast.Or) - explanations = [] - for operand in boolop.values: - explanation, result = self.visit(operand) - explanations.append(explanation) - if result == is_or: - break - name = is_or and " or " or " and " - explanation = "(" + name.join(explanations) + ")" - return explanation, result - - def visit_UnaryOp(self, unary): - pattern = unary_map[unary.op.__class__] - operand_explanation, operand_result = self.visit(unary.operand) - explanation = pattern % (operand_explanation,) - co = self._compile(pattern % ("__exprinfo_expr",)) - try: - result = self.frame.eval(co, __exprinfo_expr=operand_result) - except Exception: - raise Failure(explanation) - return explanation, result - - def visit_BinOp(self, binop): - left_explanation, left_result = self.visit(binop.left) - right_explanation, right_result = self.visit(binop.right) - symbol = operator_map[binop.op.__class__] - explanation = "(%s %s %s)" % (left_explanation, symbol, - right_explanation) - source = "__exprinfo_left %s __exprinfo_right" % (symbol,) - co = self._compile(source) - try: - result = self.frame.eval(co, __exprinfo_left=left_result, - __exprinfo_right=right_result) - except Exception: - raise Failure(explanation) - return explanation, result - - def visit_Call(self, call): - func_explanation, func = self.visit(call.func) - arg_explanations = [] - ns = {"__exprinfo_func" : func} - arguments = [] - for arg in call.args: - arg_explanation, arg_result = self.visit(arg) - if isinstance(arg, _Starred): - arg_name = "__exprinfo_star" - ns[arg_name] = arg_result - arguments.append("*%s" % (arg_name,)) - arg_explanations.append("*%s" % (arg_explanation,)) - else: - arg_name = "__exprinfo_%s" % (len(ns),) - ns[arg_name] = arg_result - arguments.append(arg_name) - arg_explanations.append(arg_explanation) - for keyword in call.keywords: - arg_explanation, arg_result = self.visit(keyword.value) - if keyword.arg: - arg_name = "__exprinfo_%s" % (len(ns),) - keyword_source = "%s=%%s" % (keyword.arg) - arguments.append(keyword_source % (arg_name,)) - arg_explanations.append(keyword_source % (arg_explanation,)) - else: - arg_name = "__exprinfo_kwds" - arguments.append("**%s" % (arg_name,)) - arg_explanations.append("**%s" % (arg_explanation,)) - - ns[arg_name] = arg_result - - if getattr(call, 'starargs', None): - arg_explanation, arg_result = self.visit(call.starargs) - arg_name = "__exprinfo_star" - ns[arg_name] = arg_result - arguments.append("*%s" % (arg_name,)) - arg_explanations.append("*%s" % (arg_explanation,)) - - if getattr(call, 'kwargs', None): - arg_explanation, arg_result = self.visit(call.kwargs) - arg_name = "__exprinfo_kwds" - ns[arg_name] = arg_result - arguments.append("**%s" % (arg_name,)) - arg_explanations.append("**%s" % (arg_explanation,)) - args_explained = ", ".join(arg_explanations) - explanation = "%s(%s)" % (func_explanation, args_explained) - args = ", ".join(arguments) - source = "__exprinfo_func(%s)" % (args,) - co = self._compile(source) - try: - result = self.frame.eval(co, **ns) - except Exception: - raise Failure(explanation) - pattern = "%s\n{%s = %s\n}" - rep = self.frame.repr(result) - explanation = pattern % (rep, rep, explanation) - return explanation, result - - def _is_builtin_name(self, name): - pattern = "%r not in globals() and %r not in locals()" - source = pattern % (name.id, name.id) - co = self._compile(source) - try: - return self.frame.eval(co) - except Exception: - return False - - def visit_Attribute(self, attr): - if not isinstance(attr.ctx, ast.Load): - return self.generic_visit(attr) - source_explanation, source_result = self.visit(attr.value) - explanation = "%s.%s" % (source_explanation, attr.attr) - source = "__exprinfo_expr.%s" % (attr.attr,) - co = self._compile(source) - try: - try: - result = self.frame.eval(co, __exprinfo_expr=source_result) - except AttributeError: - # Maybe the attribute name needs to be mangled? - if not attr.attr.startswith("__") or attr.attr.endswith("__"): - raise - source = "getattr(__exprinfo_expr.__class__, '__name__', '')" - co = self._compile(source) - class_name = self.frame.eval(co, __exprinfo_expr=source_result) - mangled_attr = "_" + class_name + attr.attr - source = "__exprinfo_expr.%s" % (mangled_attr,) - co = self._compile(source) - result = self.frame.eval(co, __exprinfo_expr=source_result) - except Exception: - raise Failure(explanation) - explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), - self.frame.repr(result), - source_explanation, attr.attr) - # Check if the attr is from an instance. - source = "%r in getattr(__exprinfo_expr, '__dict__', {})" - source = source % (attr.attr,) - co = self._compile(source) - try: - from_instance = self.frame.eval(co, __exprinfo_expr=source_result) - except Exception: - from_instance = None - if from_instance is None or self.frame.is_true(from_instance): - rep = self.frame.repr(result) - pattern = "%s\n{%s = %s\n}" - explanation = pattern % (rep, rep, explanation) - return explanation, result - - def visit_Assert(self, assrt): - test_explanation, test_result = self.visit(assrt.test) - explanation = "assert %s" % (test_explanation,) - if not self.frame.is_true(test_result): - try: - raise util.BuiltinAssertionError - except Exception: - raise Failure(explanation) - return explanation, test_result - - def visit_Assign(self, assign): - value_explanation, value_result = self.visit(assign.value) - explanation = "... = %s" % (value_explanation,) - name = ast.Name("__exprinfo_expr", ast.Load(), - lineno=assign.value.lineno, - col_offset=assign.value.col_offset) - new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, - col_offset=assign.col_offset) - mod = ast.Module([new_assign]) - co = self._compile(mod, "exec") - try: - self.frame.exec_(co, __exprinfo_expr=value_result) - except Exception: - raise Failure(explanation) - return explanation, value_result - diff --git a/third_party/python/pytest/_pytest/assertion/rewrite.py b/third_party/python/pytest/_pytest/assertion/rewrite.py index 14b8e49db2b92..6ec54d7e75a7a 100644 --- a/third_party/python/pytest/_pytest/assertion/rewrite.py +++ b/third_party/python/pytest/_pytest/assertion/rewrite.py @@ -1,6 +1,7 @@ """Rewrite assertion AST to produce nice error messages""" - +from __future__ import absolute_import, division, print_function import ast +import _ast import errno import itertools import imp @@ -44,20 +45,20 @@ class AssertionRewritingHook(object): """PEP302 Import hook which rewrites asserts.""" - def __init__(self): + def __init__(self, config): + self.config = config + self.fnpats = config.getini("python_files") self.session = None self.modules = {} + self._rewritten_names = set() self._register_with_pkg_resources() + self._must_rewrite = set() def set_session(self, session): - self.fnpats = session.config.getini("python_files") self.session = session def find_module(self, name, path=None): - if self.session is None: - return None - sess = self.session - state = sess.config._assertstate + state = self.config._assertstate state.trace("find_module called for: %s" % name) names = name.rsplit(".", 1) lastname = names[-1] @@ -78,7 +79,12 @@ def find_module(self, name, path=None): tp = desc[2] if tp == imp.PY_COMPILED: if hasattr(imp, "source_from_cache"): - fn = imp.source_from_cache(fn) + try: + fn = imp.source_from_cache(fn) + except ValueError: + # Python 3 doesn't like orphaned but still-importable + # .pyc files. + fn = fn[:-1] else: fn = fn[:-1] elif tp != imp.PY_SOURCE: @@ -86,24 +92,13 @@ def find_module(self, name, path=None): return None else: fn = os.path.join(pth, name.rpartition(".")[2] + ".py") + fn_pypath = py.path.local(fn) - # Is this a test file? - if not sess.isinitpath(fn): - # We have to be very careful here because imports in this code can - # trigger a cycle. - self.session = None - try: - for pat in self.fnpats: - if fn_pypath.fnmatch(pat): - state.trace("matched test file %r" % (fn,)) - break - else: - return None - finally: - self.session = sess - else: - state.trace("matched test file (was specified on cmdline): %r" % - (fn,)) + if not self._should_rewrite(name, fn_pypath, state): + return None + + self._rewritten_names.add(name) + # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten @@ -140,7 +135,7 @@ def find_module(self, name, path=None): co = _read_pyc(fn_pypath, pyc, state.trace) if co is None: state.trace("rewriting %r" % (fn,)) - source_stat, co = _rewrite_test(state, fn_pypath) + source_stat, co = _rewrite_test(self.config, fn_pypath) if co is None: # Probably a SyntaxError in the test. return None @@ -151,6 +146,51 @@ def find_module(self, name, path=None): self.modules[name] = co, pyc return self + def _should_rewrite(self, name, fn_pypath, state): + # always rewrite conftest files + fn = str(fn_pypath) + if fn_pypath.basename == 'conftest.py': + state.trace("rewriting conftest file: %r" % (fn,)) + return True + + if self.session is not None: + if self.session.isinitpath(fn): + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + for pat in self.fnpats: + if fn_pypath.fnmatch(pat): + state.trace("matched test file %r" % (fn,)) + return True + + for marked in self._must_rewrite: + if name.startswith(marked): + state.trace("matched marked file %r (from %r)" % (name, marked)) + return True + + return False + + def mark_rewrite(self, *names): + """Mark import names as needing to be re-written. + + The named module or package as well as any nested modules will + be re-written on import. + """ + already_imported = set(names).intersection(set(sys.modules)) + if already_imported: + for name in already_imported: + if name not in self._rewritten_names: + self._warn_already_imported(name) + self._must_rewrite.update(names) + + def _warn_already_imported(self, name): + self.config.warn( + 'P1', + 'Module already imported so can not be re-written: %s' % name) + def load_module(self, name): # If there is an existing module object named 'fullname' in # sys.modules, the loader must use that existing module. (Otherwise, @@ -170,7 +210,8 @@ def load_module(self, name): mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: - del sys.modules[name] + if name in sys.modules: + del sys.modules[name] raise return sys.modules[name] @@ -235,14 +276,16 @@ def _write_pyc(state, co, source_stat, pyc): fp.close() return True + RN = "\r\n".encode("utf-8") N = "\n".encode("utf-8") cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+") BOM_UTF8 = '\xef\xbb\xbf' -def _rewrite_test(state, fn): +def _rewrite_test(config, fn): """Try to read and rewrite *fn* and return the code object.""" + state = config._assertstate try: stat = fn.stat() source = fn.read("rb") @@ -287,9 +330,9 @@ def _rewrite_test(state, fn): # Let this pop up again in the real import. state.trace("failed to parse: %r" % (fn,)) return None, None - rewrite_asserts(tree) + rewrite_asserts(tree, fn, config) try: - co = compile(tree, fn.strpath, "exec") + co = compile(tree, fn.strpath, "exec", dont_inherit=True) except SyntaxError: # It's possible that this error is from some bug in the # assertion rewriting, but I don't know of a fast way to tell. @@ -343,9 +386,9 @@ def _read_pyc(source, pyc, trace=lambda x: None): return co -def rewrite_asserts(mod): +def rewrite_asserts(mod, module_path=None, config=None): """Rewrite the assert statements in mod.""" - AssertionRewriter().run(mod) + AssertionRewriter(module_path, config).run(mod) def _saferepr(obj): @@ -532,6 +575,11 @@ class AssertionRewriter(ast.NodeVisitor): """ + def __init__(self, module_path, config): + super(AssertionRewriter, self).__init__() + self.module_path = module_path + self.config = config + def run(self, mod): """Find all assert statements in *mod* and rewrite them.""" if not mod.body: @@ -672,6 +720,10 @@ def visit_Assert(self, assert_): the expression is false. """ + if isinstance(assert_.test, ast.Tuple) and self.config is not None: + fslocation = (self.module_path, assert_.lineno) + self.config.warn('R1', 'assertion is always true, perhaps ' + 'remove parentheses?', fslocation=fslocation) self.statements = [] self.variables = [] self.variable_counter = itertools.count() @@ -855,6 +907,8 @@ def visit_Attribute(self, attr): def visit_Compare(self, comp): self.push_format_context() left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)): + left_expl = "({0})".format(left_expl) res_variables = [self.variable() for i in range(len(comp.ops))] load_names = [ast.Name(v, ast.Load()) for v in res_variables] store_names = [ast.Name(v, ast.Store()) for v in res_variables] @@ -864,6 +918,8 @@ def visit_Compare(self, comp): results = [left_res] for i, op, next_operand in it: next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)): + next_expl = "({0})".format(next_expl) results.append(next_res) sym = binop_map[op.__class__] syms.append(ast.Str(sym)) diff --git a/third_party/python/pytest/_pytest/assertion/truncate.py b/third_party/python/pytest/_pytest/assertion/truncate.py new file mode 100644 index 0000000000000..1e1306356938a --- /dev/null +++ b/third_party/python/pytest/_pytest/assertion/truncate.py @@ -0,0 +1,102 @@ +""" +Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from __future__ import absolute_import, division, print_function +import os + +import py + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation, item, max_length=None): + """ + Truncate this assertion explanation if the given test item is eligible. + """ + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item): + """ + Whether or not this test item is eligible for truncation. + """ + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ['CI', 'BUILD_NUMBER'] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation(input_lines, max_lines=None, max_chars=None): + """ + Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = '...Full output truncated' + if truncated_line_count == 1: + msg += ' ({0} line hidden)'.format(truncated_line_count) + else: + msg += ' ({0} lines hidden)'.format(truncated_line_count) + msg += ", {0}" .format(USAGE_MSG) + truncated_explanation.extend([ + py.builtin._totext(""), + py.builtin._totext(msg), + ]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines, max_chars): + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/third_party/python/pytest/_pytest/assertion/util.py b/third_party/python/pytest/_pytest/assertion/util.py index f2f23efea2755..06eda8d915d69 100644 --- a/third_party/python/pytest/_pytest/assertion/util.py +++ b/third_party/python/pytest/_pytest/assertion/util.py @@ -1,4 +1,5 @@ """Utilities for assertion debugging""" +from __future__ import absolute_import, division, print_function import pprint import _pytest._code @@ -8,7 +9,7 @@ except ImportError: Sequence = list -BuiltinAssertionError = py.builtin.builtins.AssertionError + u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion @@ -38,44 +39,11 @@ def format_explanation(explanation): displaying diffs. """ explanation = ecu(explanation) - explanation = _collapse_false(explanation) lines = _split_explanation(explanation) result = _format_lines(lines) return u('\n').join(result) -def _collapse_false(explanation): - """Collapse expansions of False - - So this strips out any "assert False\n{where False = ...\n}" - blocks. - """ - where = 0 - while True: - start = where = explanation.find("False\n{False = ", where) - if where == -1: - break - level = 0 - prev_c = explanation[start] - for i, c in enumerate(explanation[start:]): - if prev_c + c == "\n{": - level += 1 - elif prev_c + c == "\n}": - level -= 1 - if not level: - break - prev_c = c - else: - raise AssertionError("unbalanced braces: %r" % (explanation,)) - end = start + i - where = end - if explanation[end - 1] == '\n': - explanation = (explanation[:start] + explanation[start+15:end-1] + - explanation[end+1:]) - where -= 17 - return explanation - - def _split_explanation(explanation): """Return a list of individual lines in the explanation @@ -138,7 +106,7 @@ def _format_lines(lines): def assertrepr_compare(config, op, left, right): """Return specialised explanations for some operators/operands""" width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op - left_repr = py.io.saferepr(left, maxsize=int(width/2)) + left_repr = py.io.saferepr(left, maxsize=int(width//2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr)) @@ -225,9 +193,10 @@ def _diff_text(left, right, verbose=False): 'characters in diff, use -v to show') % i] left = left[:-i] right = right[:-i] + keepends = True explanation += [line.strip('\n') - for line in ndiff(left.splitlines(), - right.splitlines())] + for line in ndiff(left.splitlines(keepends), + right.splitlines(keepends))] return explanation @@ -288,8 +257,8 @@ def _compare_eq_dict(left, right, verbose=False): explanation = [] common = set(left).intersection(set(right)) same = dict((k, left[k]) for k in common if left[k] == right[k]) - if same and not verbose: - explanation += [u('Omitting %s identical items, use -v to show') % + if same and verbose < 2: + explanation += [u('Omitting %s identical items, use -vv to show') % len(same)] elif same: explanation += [u('Common items:')] diff --git a/third_party/python/pytest/_pytest/cacheprovider.py b/third_party/python/pytest/_pytest/cacheprovider.py old mode 100755 new mode 100644 index 0657001f2d4db..7fc08fff36803 --- a/third_party/python/pytest/_pytest/cacheprovider.py +++ b/third_party/python/pytest/_pytest/cacheprovider.py @@ -1,10 +1,10 @@ """ merged implementation of the cache provider -the name cache was not choosen to ensure pluggy automatically +the name cache was not chosen to ensure pluggy automatically ignores the external pytest-cache """ - +from __future__ import absolute_import, division, print_function import py import pytest import json @@ -139,11 +139,11 @@ def pytest_collection_modifyitems(self, session, config, items): # running a subset of all tests with recorded failures outside # of the set of tests currently executing pass - elif self.config.getvalue("failedfirst"): - items[:] = previously_failed + previously_passed - else: + elif self.config.getvalue("lf"): items[:] = previously_failed config.hook.pytest_deselected(items=previously_passed) + else: + items[:] = previously_failed + previously_passed def pytest_sessionfinish(self, session): config = self.config @@ -219,7 +219,7 @@ def cacheshow(config, session): basedir = config.cache._cachedir vdir = basedir.join("v") tw.sep("-", "cache values") - for valpath in vdir.visit(lambda x: x.isfile()): + for valpath in sorted(vdir.visit(lambda x: x.isfile())): key = valpath.relto(vdir).replace(valpath.sep, "/") val = config.cache.get(key, dummy) if val is dummy: @@ -235,7 +235,7 @@ def cacheshow(config, session): ddir = basedir.join("d") if ddir.isdir() and ddir.listdir(): tw.sep("-", "cache directories") - for p in basedir.join("d").visit(): + for p in sorted(basedir.join("d").visit()): #if p.check(dir=1): # print("%s/" % p.relto(basedir)) if p.isfile(): diff --git a/third_party/python/pytest/_pytest/capture.py b/third_party/python/pytest/_pytest/capture.py index 3895a714aa03f..3661f26919e21 100644 --- a/third_party/python/pytest/_pytest/capture.py +++ b/third_party/python/pytest/_pytest/capture.py @@ -2,16 +2,19 @@ per-test stdout/stderr capturing mechanism. """ -from __future__ import with_statement +from __future__ import absolute_import, division, print_function +import contextlib import sys import os +import io +from io import UnsupportedOperation from tempfile import TemporaryFile import py import pytest +from _pytest.compat import CaptureIO -from py.io import TextIO unicode = py.builtin.text patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} @@ -31,8 +34,10 @@ def pytest_addoption(parser): @pytest.hookimpl(hookwrapper=True) def pytest_load_initial_conftests(early_config, parser, args): - _readline_workaround() ns = early_config.known_args_namespace + if ns.capture == "fd": + _py36_windowsconsoleio_workaround() + _readline_workaround() pluginmanager = early_config.pluginmanager capman = CaptureManager(ns.capture) pluginmanager.register(capman, "capturemanager") @@ -146,46 +151,48 @@ def pytest_keyboard_interrupt(self, excinfo): def pytest_internalerror(self, excinfo): self.reset_capturings() - def suspendcapture_item(self, item, when): - out, err = self.suspendcapture() + def suspendcapture_item(self, item, when, in_=False): + out, err = self.suspendcapture(in_=in_) item.add_report_section(when, "stdout", out) item.add_report_section(when, "stderr", err) + error_capsysfderror = "cannot use capsys and capfd at the same time" @pytest.fixture def capsys(request): - """enables capturing of writes to sys.stdout/sys.stderr and makes + """Enable capturing of writes to sys.stdout/sys.stderr and make captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - if "capfd" in request._funcargs: + if "capfd" in request.fixturenames: raise request.raiseerror(error_capsysfderror) - request.node._capfuncarg = c = CaptureFixture(SysCapture) + request.node._capfuncarg = c = CaptureFixture(SysCapture, request) return c @pytest.fixture def capfd(request): - """enables capturing of writes to file descriptors 1 and 2 and makes + """Enable capturing of writes to file descriptors 1 and 2 and make captured output available via ``capfd.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - if "capsys" in request._funcargs: + if "capsys" in request.fixturenames: request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): pytest.skip("capfd funcarg needs os.dup") - request.node._capfuncarg = c = CaptureFixture(FDCapture) + request.node._capfuncarg = c = CaptureFixture(FDCapture, request) return c class CaptureFixture: - def __init__(self, captureclass): + def __init__(self, captureclass, request): self.captureclass = captureclass + self.request = request def _start(self): self._capture = MultiCapture(out=True, err=True, in_=False, - Capture=self.captureclass) + Capture=self.captureclass) self._capture.start_capturing() def close(self): @@ -200,6 +207,15 @@ def readouterr(self): except AttributeError: return self._outerr + @contextlib.contextmanager + def disabled(self): + capmanager = self.request.config.pluginmanager.getplugin('capturemanager') + capmanager.suspendcapture_item(self.request.node, "call", in_=True) + try: + yield + finally: + capmanager.resumecapture() + def safe_text_dupfile(f, mode, default_encoding="UTF8"): """ return a open text file object that's a duplicate of f on the @@ -390,7 +406,7 @@ def __init__(self, fd, tmpfile=None): if name == "stdin": tmpfile = DontReadFromInput() else: - tmpfile = TextIO() + tmpfile = CaptureIO() self.tmpfile = tmpfile def start(self): @@ -436,7 +452,8 @@ def read(self, *args): __iter__ = read def fileno(self): - raise ValueError("redirected Stdin is pseudofile, has no fileno()") + raise UnsupportedOperation("redirected stdin is pseudofile, " + "has no fileno()") def isatty(self): return False @@ -444,6 +461,13 @@ def isatty(self): def close(self): pass + @property + def buffer(self): + if sys.version_info >= (3,0): + return self + else: + raise AttributeError('redirected stdin has no attribute buffer') + def _readline_workaround(): """ @@ -452,7 +476,7 @@ def _readline_workaround(): Pdb uses readline support where available--when not running from the Python prompt, the readline module is not imported until running the pdb REPL. If - running py.test with the --pdb option this means the readline module is not + running pytest with the --pdb option this means the readline module is not imported until after I/O capture has been started. This is a problem for pyreadline, which is often used to implement readline @@ -470,3 +494,49 @@ def _readline_workaround(): import readline # noqa except ImportError: pass + + +def _py36_windowsconsoleio_workaround(): + """ + Python 3.6 implemented unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + See https://github.com/pytest-dev/py/issues/103 + """ + if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6): + return + + buffered = hasattr(sys.stdout.buffer, 'raw') + raw_stdout = sys.stdout.buffer.raw if buffered else sys.stdout.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == 'w': + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering) + + sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb') + sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb') + sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb') diff --git a/third_party/python/pytest/_pytest/compat.py b/third_party/python/pytest/_pytest/compat.py new file mode 100644 index 0000000000000..8c200af5fe5a5 --- /dev/null +++ b/third_party/python/pytest/_pytest/compat.py @@ -0,0 +1,307 @@ +""" +python version compatibility code +""" +from __future__ import absolute_import, division, print_function +import sys +import inspect +import types +import re +import functools + +import py + +import _pytest + + + +try: + import enum +except ImportError: # pragma: no cover + # Only available in Python 3.4+ or as a backport + enum = None + + +_PY3 = sys.version_info > (3, 0) +_PY2 = not _PY3 + + +NoneType = type(None) +NOTSET = object() + +PY35 = sys.version_info[:2] >= (3, 5) +PY36 = sys.version_info[:2] >= (3, 6) +MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError' + +if hasattr(inspect, 'signature'): + def _format_args(func): + return str(inspect.signature(func)) +else: + def _format_args(func): + return inspect.formatargspec(*inspect.getargspec(func)) + +isfunction = inspect.isfunction +isclass = inspect.isclass +# used to work around a python2 exception info leak +exc_clear = getattr(sys, 'exc_clear', lambda: None) +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile('')) + + +def is_generator(func): + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, + which in turns also initializes the "logging" module as side-effect (see issue #8). + """ + return (getattr(func, '_is_coroutine', False) or + (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func))) + + +def getlocation(function, curdir): + import inspect + fn = py.path.local(inspect.getfile(function)) + lineno = py.builtin._getcode(function).co_firstlineno + if fn.relto(curdir): + fn = fn.relto(curdir) + return "%s:%d" %(fn, lineno+1) + + +def num_mock_patch_args(function): + """ return number of arguments used up by mock arguments (if any) """ + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None)) + if mock is not None: + return len([p for p in patchings + if not p.attribute_name and p.new is mock.DEFAULT]) + return len(patchings) + + +def getfuncargnames(function, startindex=None): + # XXX merge with main.py's varnames + #assert not isclass(function) + realfunction = function + while hasattr(realfunction, "__wrapped__"): + realfunction = realfunction.__wrapped__ + if startindex is None: + startindex = inspect.ismethod(function) and 1 or 0 + if realfunction != function: + startindex += num_mock_patch_args(function) + function = realfunction + if isinstance(function, functools.partial): + argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0] + partial = function + argnames = argnames[len(partial.args):] + if partial.keywords: + for kw in partial.keywords: + argnames.remove(kw) + else: + argnames = inspect.getargs(_pytest._code.getrawcode(function))[0] + defaults = getattr(function, 'func_defaults', + getattr(function, '__defaults__', None)) or () + numdefaults = len(defaults) + if numdefaults: + return tuple(argnames[startindex:-numdefaults]) + return tuple(argnames[startindex:]) + + + +if sys.version_info[:2] == (2, 6): + def isclass(object): + """ Return true if the object is a class. Overrides inspect.isclass for + python 2.6 because it will return True for objects which always return + something on __getattr__ calls (see #1035). + Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc + """ + return isinstance(object, (type, types.ClassType)) + + +if _PY3: + import codecs + imap = map + STRING_TYPES = bytes, str + UNICODE_TYPES = str, + + def _escape_strings(val): + """If val is pure ascii, returns it as a str(). Otherwise, escapes + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' + + note: + the obvious "v.decode('unicode-escape')" will return + valid utf-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a utf-8 string. + + """ + if isinstance(val, bytes): + if val: + # source: http://goo.gl/bGsnwC + encoded_bytes, _ = codecs.escape_encode(val) + return encoded_bytes.decode('ascii') + else: + # empty bytes crashes codecs.escape_encode (#1087) + return '' + else: + return val.encode('unicode_escape').decode('ascii') +else: + STRING_TYPES = bytes, str, unicode + UNICODE_TYPES = unicode, + + from itertools import imap # NOQA + + def _escape_strings(val): + """In py2 bytes and str are the same type, so return if it's a bytes + object, return it unchanged if it is a full ascii string, + otherwise escape it into its binary form. + + If it's a unicode string, change the unicode characters into + unicode escapes. + + """ + if isinstance(val, bytes): + try: + return val.encode('ascii') + except UnicodeDecodeError: + return val.encode('string-escape') + else: + return val.encode('unicode-escape') + + +def get_real_func(obj): + """ gets the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial. + """ + start_obj = obj + for i in range(100): + new_obj = getattr(obj, '__wrapped__', None) + if new_obj is None: + break + obj = new_obj + else: + raise ValueError( + ("could not find real function of {start}" + "\nstopped at {current}").format( + start=py.io.saferepr(start_obj), + current=py.io.saferepr(obj))) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def getfslineno(obj): + # xxx let decorators etc specify a sane ordering + obj = get_real_func(obj) + if hasattr(obj, 'place_as'): + obj = obj.place_as + fslineno = _pytest._code.getfslineno(obj) + assert isinstance(fslineno[1], int), obj + return fslineno + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + try: + return func.im_func + except AttributeError: + return func + + +def safe_getattr(object, name, default): + """ Like getattr but return default upon any Exception. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + """ + try: + return getattr(object, name, default) + except Exception: + return default + + +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if a @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + """ + return sys.version_info >= (3, 4) + + +if _PY3: + def safe_str(v): + """returns v as string""" + return str(v) +else: + def safe_str(v): + """returns v as string, converting to ascii if necessary""" + try: + return str(v) + except UnicodeError: + if not isinstance(v, unicode): + v = unicode(v) + errors = 'replace' + return v.encode('utf-8', errors) + + +COLLECT_FAKEMODULE_ATTRIBUTES = ( + 'Collector', + 'Module', + 'Generator', + 'Function', + 'Instance', + 'Session', + 'Item', + 'Class', + 'File', + '_fillfuncargs', +) + + +def _setup_collect_fakemodule(): + from types import ModuleType + import pytest + pytest.collect = ModuleType('pytest.collect') + pytest.collect.__all__ = [] # used for setns + for attr in COLLECT_FAKEMODULE_ATTRIBUTES: + setattr(pytest.collect, attr, getattr(pytest, attr)) + + +if _PY2: + from py.io import TextIO as CaptureIO +else: + import io + + class CaptureIO(io.TextIOWrapper): + def __init__(self): + super(CaptureIO, self).__init__( + io.BytesIO(), + encoding='UTF-8', newline='', write_through=True, + ) + + def getvalue(self): + return self.buffer.getvalue().decode('UTF-8') + +class FuncargnamesCompatAttr(object): + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames diff --git a/third_party/python/pytest/_pytest/config.py b/third_party/python/pytest/_pytest/config.py index 9a308df2bb980..dadd5ca9d6004 100644 --- a/third_party/python/pytest/_pytest/config.py +++ b/third_party/python/pytest/_pytest/config.py @@ -1,4 +1,5 @@ """ command line options, ini-file and conftest.py processing. """ +from __future__ import absolute_import, division, print_function import argparse import shlex import traceback @@ -7,10 +8,13 @@ import py # DON't import pytest here because it causes import cycle troubles -import sys, os +import sys +import os import _pytest._code import _pytest.hookspec # the extension point definitions +import _pytest.assertion from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker +from _pytest.compat import safe_str hookimpl = HookimplMarker("pytest") hookspec = HookspecMarker("pytest") @@ -25,6 +29,12 @@ def __init__(self, path, excinfo): self.path = path self.excinfo = excinfo + def __str__(self): + etype, evalue, etb = self.excinfo + formatted = traceback.format_tb(etb) + # The level of the tracebacks we want to print is hand crafted :( + return repr(evalue) + '\n' + ''.join(formatted[2:]) + def main(args=None, plugins=None): """ return exit code, after performing an in-process test run. @@ -45,7 +55,6 @@ def main(args=None, plugins=None): return 4 else: try: - config.pluginmanager.check_pending() return config.hook.pytest_cmdline_main(config=config) finally: config._ensure_unconfigure() @@ -57,15 +66,47 @@ def main(args=None, plugins=None): class cmdline: # compatibility namespace main = staticmethod(main) + class UsageError(Exception): """ error in pytest usage or invocation""" + +class PrintHelp(Exception): + """Raised when pytest should print it's help to skip the rest of the + argument parsing and validation.""" + pass + + +def filename_arg(path, optname): + """ Argparse type validator for filename arguments. + + :path: path of filename + :optname: name of the option + """ + if os.path.isdir(path): + raise UsageError("{0} must be a filename, given: {1}".format(optname, path)) + return path + + +def directory_arg(path, optname): + """Argparse type validator for directory arguments. + + :path: path of directory + :optname: name of the option + """ + if not os.path.isdir(path): + raise UsageError("{0} must be a directory, given: {1}".format(optname, path)) + return path + + _preinit = [] default_plugins = ( - "mark main terminal runner python pdb unittest capture skipping " - "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " - "junitxml resultlog doctest cacheprovider").split() + "mark main terminal runner python fixtures debugging unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " + "junitxml resultlog doctest cacheprovider freeze_support " + "setuponly setupplan warnings").split() + builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") @@ -97,6 +138,7 @@ def get_plugin_manager(): return get_config().pluginmanager def _prepareconfig(args=None, plugins=None): + warning = None if args is None: args = sys.argv[1:] elif isinstance(args, py.path.local): @@ -105,6 +147,8 @@ def _prepareconfig(args=None, plugins=None): if not isinstance(args, str): raise ValueError("not a string or argument list: %r" % (args,)) args = shlex.split(args, posix=sys.platform != "win32") + from _pytest import deprecated + warning = deprecated.MAIN_STR_ARGS config = get_config() pluginmanager = config.pluginmanager try: @@ -114,6 +158,8 @@ def _prepareconfig(args=None, plugins=None): pluginmanager.consider_pluginarg(plugin) else: pluginmanager.register(plugin) + if warning: + config.warn('C1', warning) return pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args) except BaseException: @@ -123,7 +169,7 @@ def _prepareconfig(args=None, plugins=None): class PytestPluginManager(PluginManager): """ - Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific + Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific functionality: * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and @@ -139,6 +185,7 @@ def __init__(self): self._conftestpath2mod = {} self._confcutdir = None self._noconftest = False + self._duplicatepaths = set() self.add_hookspecs(_pytest.hookspec) self.register(self) @@ -152,11 +199,14 @@ def __init__(self): self.trace.root.setwriter(err.write) self.enable_tracing() + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + def addhooks(self, module_or_class): """ .. deprecated:: 2.8 - Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead. + Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>` instead. """ warning = dict(code="I2", fslocation=_pytest._code.getfslineno(sys._getframe(1)), @@ -209,6 +259,9 @@ def register(self, plugin, name=None): if ret: self.hook.pytest_plugin_registered.call_historic( kwargs=dict(plugin=plugin, manager=self)) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) return ret def getplugin(self, name): @@ -353,38 +406,37 @@ def consider_pluginarg(self, arg): self.import_plugin(arg) def consider_conftest(self, conftestmodule): - if self.register(conftestmodule, name=conftestmodule.__file__): - self.consider_module(conftestmodule) + self.register(conftestmodule, name=conftestmodule.__file__) def consider_env(self): self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) def consider_module(self, mod): - self._import_plugin_specs(getattr(mod, "pytest_plugins", None)) + self._import_plugin_specs(getattr(mod, 'pytest_plugins', [])) def _import_plugin_specs(self, spec): - if spec: - if isinstance(spec, str): - spec = spec.split(",") - for import_spec in spec: - self.import_plugin(import_spec) + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) def import_plugin(self, modname): # most often modname refers to builtin modules, e.g. "pytester", # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. - assert isinstance(modname, str) + assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname + modname = str(modname) if self.get_plugin(modname) is not None: return if modname in builtin_plugins: importspec = "_pytest." + modname else: importspec = modname + self.rewrite_hook.mark_rewrite(importspec) try: __import__(importspec) except ImportError as e: - new_exc = ImportError('Error importing plugin "%s": %s' % (modname, e)) + new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0]))) # copy over name and path attributes for attr in ('name', 'path'): if hasattr(e, attr): @@ -398,7 +450,24 @@ def import_plugin(self, modname): else: mod = sys.modules[importspec] self.register(mod, modname) - self.consider_module(mod) + + +def _get_plugin_specs_as_list(specs): + """ + Parses a list of "plugin specs" and returns a list of plugin names. + + Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in + which case it is returned as a list. Specs can also be `None` in which case an + empty list is returned. + """ + if specs is not None: + if isinstance(specs, str): + specs = specs.split(',') if specs else [] + if not isinstance(specs, (list, tuple)): + raise UsageError("Plugin specs must be a ','-separated string or a " + "list/tuple of strings for plugin names. Given: %r" % specs) + return list(specs) + return [] class Parser: @@ -537,13 +606,18 @@ def __str__(self): class Argument: - """class that mimics the necessary behaviour of optparse.Option """ + """class that mimics the necessary behaviour of optparse.Option + + its currently a least effort implementation + and ignoring choices and integer prefixes + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ _typ_map = { 'int': int, 'string': str, - } - # enable after some grace period for plugin writers - TYPE_WARN = False + 'float': float, + 'complex': complex, + } def __init__(self, *names, **attrs): """store parms in private vars for use in add_argument""" @@ -551,17 +625,12 @@ def __init__(self, *names, **attrs): self._short_opts = [] self._long_opts = [] self.dest = attrs.get('dest') - if self.TYPE_WARN: - try: - help = attrs['help'] - if '%default' in help: - warnings.warn( - 'pytest now uses argparse. "%default" should be' - ' changed to "%(default)s" ', - FutureWarning, - stacklevel=3) - except KeyError: - pass + if '%default' in (attrs.get('help') or ''): + warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + DeprecationWarning, + stacklevel=3) try: typ = attrs['type'] except KeyError: @@ -570,25 +639,23 @@ def __init__(self, *names, **attrs): # this might raise a keyerror as well, don't want to catch that if isinstance(typ, py.builtin._basestring): if typ == 'choice': - if self.TYPE_WARN: - warnings.warn( - 'type argument to addoption() is a string %r.' - ' For parsearg this is optional and when supplied ' - ' should be a type.' - ' (options: %s)' % (typ, names), - FutureWarning, - stacklevel=3) + warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied' + ' should be a type.' + ' (options: %s)' % (typ, names), + DeprecationWarning, + stacklevel=3) # argparse expects a type here take it from # the type of the first element attrs['type'] = type(attrs['choices'][0]) else: - if self.TYPE_WARN: - warnings.warn( - 'type argument to addoption() is a string %r.' - ' For parsearg this should be a type.' - ' (options: %s)' % (typ, names), - FutureWarning, - stacklevel=3) + warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + DeprecationWarning, + stacklevel=3) attrs['type'] = Argument._typ_map[typ] # used in test_parseopt -> test_parse_defaultgetter self.type = attrs['type'] @@ -655,20 +722,17 @@ def _set_opt_strings(self, opts): self._long_opts.append(opt) def __repr__(self): - retval = 'Argument(' + args = [] if self._short_opts: - retval += '_short_opts: ' + repr(self._short_opts) + ', ' + args += ['_short_opts: ' + repr(self._short_opts)] if self._long_opts: - retval += '_long_opts: ' + repr(self._long_opts) + ', ' - retval += 'dest: ' + repr(self.dest) + ', ' + args += ['_long_opts: ' + repr(self._long_opts)] + args += ['dest: ' + repr(self.dest)] if hasattr(self, 'type'): - retval += 'type: ' + repr(self.type) + ', ' + args += ['type: ' + repr(self.type)] if hasattr(self, 'default'): - retval += 'default: ' + repr(self.default) + ', ' - if retval[-2:] == ', ': # always long enough to test ("Argument(" ) - retval = retval[:-2] - retval += ')' - return retval + args += ['default: ' + repr(self.default)] + return 'Argument({0})'.format(', '.join(args)) class OptionGroup: @@ -686,6 +750,10 @@ def addoption(self, *optnames, **attrs): results in help showing '--two-words' only, but --twowords gets accepted **and** the automatic destination is in args.twowords """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names()) + if conflict: + raise ValueError("option names %s already added" % conflict) option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) @@ -772,7 +840,7 @@ def _format_action_invocation(self, action): if len(option) == 2 or option[2] == ' ': return_list.append(option) if option[2:] == short_long.get(option.replace('-', '')): - return_list.append(option.replace(' ', '=')) + return_list.append(option.replace(' ', '=', 1)) action._formatted_action_invocation = ', '.join(return_list) return action._formatted_action_invocation @@ -797,9 +865,11 @@ class Notset: def __repr__(self): return "" + notset = Notset() FILE_OR_DIR = 'file_or_dir' + class Config(object): """ access to configuration values, pluginmanager and plugin hooks. """ @@ -817,14 +887,17 @@ def __init__(self, pluginmanager): self.trace = self.pluginmanager.trace.root.get("config") self.hook = self.pluginmanager.hook self._inicache = {} + self._override_ini = () self._opt2dest = {} self._cleanup = [] self._warn = self.pluginmanager._warn self.pluginmanager.register(self, "pytestconfig") self._configured = False + def do_setns(dic): import pytest setns(pytest, dic) + self.hook.pytest_namespace.call_historic(do_setns, {}) self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) @@ -847,11 +920,11 @@ def _ensure_unconfigure(self): fin = self._cleanup.pop() fin() - def warn(self, code, message, fslocation=None): + def warn(self, code, message, fslocation=None, nodeid=None): """ generate a warning for this test session. """ self.hook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, - fslocation=fslocation, nodeid=None)) + fslocation=fslocation, nodeid=nodeid)) def get_terminal_writer(self): return self.pluginmanager.get_plugin("terminalreporter")._tw @@ -908,13 +981,81 @@ def pytest_load_initial_conftests(self, early_config): def _initini(self, args): ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy()) - r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args) + r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn) self.rootdir, self.inifile, self.inicfg = r self._parser.extra_info['rootdir'] = self.rootdir self._parser.extra_info['inifile'] = self.inifile self.invocation_dir = py.path.local() self._parser.addini('addopts', 'extra command line options', 'args') self._parser.addini('minversion', 'minimally required pytest version') + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args): + """Install the PEP 302 import hook if using assertion re-writing. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for re-writing + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = ns.assertmode + if mode == 'rewrite': + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = 'plain' + else: + self._mark_plugins_for_rewrite(hook) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook): + """ + Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins. + """ + import pkg_resources + self.pluginmanager.rewrite_hook = hook + + # 'RECORD' available for plugins installed normally (pip install) + # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) + # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa + # so it shouldn't be an issue + metadata_files = 'RECORD', 'SOURCES.txt' + + package_files = ( + entry.split(',')[0] + for entrypoint in pkg_resources.iter_entry_points('pytest11') + for metadata in metadata_files + for entry in entrypoint.dist._get_metadata(metadata) + ) + + for fn in package_files: + is_simple_module = os.sep not in fn and fn.endswith('.py') + is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py') + if is_simple_module: + module_name, ext = os.path.splitext(fn) + hook.mark_rewrite(module_name) + elif is_package: + package_name = os.path.dirname(fn) + hook.mark_rewrite(package_name) + + def _warn_about_missing_assertion(self, mode): + try: + assert False + except AssertionError: + pass + else: + if mode == 'plain': + sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?") + else: + sys.stderr.write("WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n") def _preparse(self, args, addopts=True): self._initini(args) @@ -922,13 +1063,12 @@ def _preparse(self, args, addopts=True): args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args args[:] = self.getini("addopts") + args self._checkversion() + self._consider_importhook(args) self.pluginmanager.consider_preparse(args) - try: - self.pluginmanager.load_setuptools_entrypoints("pytest11") - except ImportError as e: - self.warn("I2", "could not load setuptools entry import: %s" % (e,)) + self.pluginmanager.load_setuptools_entrypoints('pytest11') self.pluginmanager.consider_env() self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy()) + confcutdir = self.known_args_namespace.confcutdir if self.known_args_namespace.confcutdir is None and self.inifile: confcutdir = py.path.local(self.inifile).dirname self.known_args_namespace.confcutdir = confcutdir @@ -966,14 +1106,18 @@ def parse(self, args, addopts=True): self._preparse(args, addopts=addopts) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) - args = self._parser.parse_setoption(args, self.option, namespace=self.option) - if not args: - cwd = os.getcwd() - if cwd == self.rootdir: - args = self.getini('testpaths') + self._parser.after_preparse = True + try: + args = self._parser.parse_setoption(args, self.option, namespace=self.option) if not args: - args = [cwd] - self.args = args + cwd = os.getcwd() + if cwd == self.rootdir: + args = self.getini('testpaths') + if not args: + args = [cwd] + self.args = args + except PrintHelp: + pass def addinivalue_line(self, name, line): """ add a line to an ini-file option. The option must have been @@ -986,7 +1130,7 @@ def addinivalue_line(self, name, line): def getini(self, name): """ return configuration value from an :ref:`ini file `. If the specified name hasn't been registered through a prior - :py:func:`parser.addini ` + :py:func:`parser.addini <_pytest.config.Parser.addini>` call (usually from a plugin), a ValueError is raised. """ try: return self._inicache[name] @@ -999,14 +1143,16 @@ def _getini(self, name): description, type, default = self._parser._inidict[name] except KeyError: raise ValueError("unknown configuration value: %r" %(name,)) - try: - value = self.inicfg[name] - except KeyError: - if default is not None: - return default - if type is None: - return '' - return [] + value = self._get_override_ini_value(name) + if value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return '' + return [] if type == "pathlist": dp = py.path.local(self.inicfg.config.path).dirpath() l = [] @@ -1037,6 +1183,22 @@ def _getconftest_pathlist(self, name, path): l.append(relroot) return l + def _get_override_ini_value(self, name): + value = None + # override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and + # and -o foo1=bar1 -o foo2=bar2 options + # always use the last item if multiple value set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 + for ini_config_list in self._override_ini: + for ini_config in ini_config_list: + try: + (key, user_ini_value) = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + if key == name: + value = user_ini_value + return value + def getoption(self, name, default=notset, skip=False): """ return command line option value. @@ -1074,7 +1236,18 @@ def exists(path, ignore=EnvironmentError): except ignore: return False -def getcfg(args, inibasenames): +def getcfg(args, warnfunc=None): + """ + Search the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict). + + note: warnfunc is an optional function used to warn + about ini-files that use deprecated features. + This parameter should be removed when pytest + adopts standard deprecation warnings (#1804). + """ + from _pytest.deprecated import SETUP_CFG_PYTEST + inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] args = [x for x in args if not str(x).startswith("-")] if not args: args = [py.path.local()] @@ -1086,57 +1259,89 @@ def getcfg(args, inibasenames): if exists(p): iniconfig = py.iniconfig.IniConfig(p) if 'pytest' in iniconfig.sections: + if inibasename == 'setup.cfg' and warnfunc: + warnfunc('C1', SETUP_CFG_PYTEST) return base, p, iniconfig['pytest'] + if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections: + return base, p, iniconfig['tool:pytest'] elif inibasename == "pytest.ini": # allowed to be empty return base, p, {} return None, None, None -def get_common_ancestor(args): - # args are what we get after early command line parsing (usually - # strings, but can be py.path.local objects as well) +def get_common_ancestor(paths): common_ancestor = None - for arg in args: - if str(arg)[0] == "-": + for path in paths: + if not path.exists(): continue - p = py.path.local(arg) if common_ancestor is None: - common_ancestor = p + common_ancestor = path else: - if p.relto(common_ancestor) or p == common_ancestor: + if path.relto(common_ancestor) or path == common_ancestor: continue - elif common_ancestor.relto(p): - common_ancestor = p + elif common_ancestor.relto(path): + common_ancestor = path else: - shared = p.common(common_ancestor) + shared = path.common(common_ancestor) if shared is not None: common_ancestor = shared if common_ancestor is None: common_ancestor = py.path.local() - elif not common_ancestor.isdir(): + elif common_ancestor.isfile(): common_ancestor = common_ancestor.dirpath() return common_ancestor -def determine_setup(inifile, args): +def get_dirs_from_args(args): + def is_option(x): + return str(x).startswith('-') + + def get_file_part_from_node_id(x): + return str(x).split('::')[0] + + def get_dir_from_path(path): + if path.isdir(): + return path + return py.path.local(path.dirname) + + # These look like paths but may not exist + possible_paths = ( + py.path.local(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [ + get_dir_from_path(path) + for path in possible_paths + if path.exists() + ] + + +def determine_setup(inifile, args, warnfunc=None): + dirs = get_dirs_from_args(args) if inifile: iniconfig = py.iniconfig.IniConfig(inifile) try: inicfg = iniconfig["pytest"] except KeyError: inicfg = None - rootdir = get_common_ancestor(args) + rootdir = get_common_ancestor(dirs) else: - ancestor = get_common_ancestor(args) - rootdir, inifile, inicfg = getcfg( - [ancestor], ["pytest.ini", "tox.ini", "setup.cfg"]) + ancestor = get_common_ancestor(dirs) + rootdir, inifile, inicfg = getcfg([ancestor], warnfunc=warnfunc) if rootdir is None: for rootdir in ancestor.parts(reverse=True): if rootdir.join("setup.py").exists(): break else: - rootdir = ancestor + rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc) + if rootdir is None: + rootdir = get_common_ancestor([py.path.local(), ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep + if is_fs_root: + rootdir = ancestor return rootdir, inifile, inicfg or {} diff --git a/third_party/python/pytest/_pytest/pdb.py b/third_party/python/pytest/_pytest/debugging.py similarity index 69% rename from third_party/python/pytest/_pytest/pdb.py rename to third_party/python/pytest/_pytest/debugging.py index 84c920d172cb9..73a0a2ef5856c 100644 --- a/third_party/python/pytest/_pytest/pdb.py +++ b/third_party/python/pytest/_pytest/debugging.py @@ -1,51 +1,65 @@ """ interactive debugging with PDB, the Python Debugger. """ -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import pdb import sys -import pytest def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--pdb', - action="store_true", dest="usepdb", default=False, - help="start the interactive Python debugger on errors.") + group._addoption( + '--pdb', dest="usepdb", action="store_true", + help="start the interactive Python debugger on errors.") + group._addoption( + '--pdbcls', dest="usepdb_cls", metavar="modulename:classname", + help="start a custom interactive Python debugger on errors. " + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb") -def pytest_namespace(): - return {'set_trace': pytestPDB().set_trace} def pytest_configure(config): + if config.getvalue("usepdb_cls"): + modname, classname = config.getvalue("usepdb_cls").split(":") + __import__(modname) + pdb_cls = getattr(sys.modules[modname], classname) + else: + pdb_cls = pdb.Pdb + if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') old = (pdb.set_trace, pytestPDB._pluginmanager) + def fin(): pdb.set_trace, pytestPDB._pluginmanager = old pytestPDB._config = None - pdb.set_trace = pytest.set_trace + pytestPDB._pdb_cls = pdb.Pdb + + pdb.set_trace = pytestPDB.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config + pytestPDB._pdb_cls = pdb_cls config._cleanup.append(fin) class pytestPDB: """ Pseudo PDB that defers to the real pdb. """ _pluginmanager = None _config = None + _pdb_cls = pdb.Pdb - def set_trace(self): + @classmethod + def set_trace(cls): """ invoke PDB set_trace debugging, dropping any IO capturing. """ import _pytest.config frame = sys._getframe().f_back - if self._pluginmanager is not None: - capman = self._pluginmanager.getplugin("capturemanager") + if cls._pluginmanager is not None: + capman = cls._pluginmanager.getplugin("capturemanager") if capman: capman.suspendcapture(in_=True) - tw = _pytest.config.create_terminal_writer(self._config) + tw = _pytest.config.create_terminal_writer(cls._config) tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") - self._pluginmanager.hook.pytest_enter_pdb(config=self._config) - pdb.Pdb().set_trace(frame) + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config) + cls._pdb_cls().set_trace(frame) class PdbInvoke: @@ -59,7 +73,7 @@ def pytest_exception_interact(self, node, call, report): def pytest_internalerror(self, excrepr, excinfo): for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() tb = _postmortem_traceback(excinfo) post_mortem(tb) @@ -98,7 +112,7 @@ def _find_last_non_hidden_frame(stack): def post_mortem(t): - class Pdb(pdb.Pdb): + class Pdb(pytestPDB._pdb_cls): def get_stack(self, f, t): stack, i = pdb.Pdb.get_stack(self, f, t) if f is None: diff --git a/third_party/python/pytest/_pytest/deprecated.py b/third_party/python/pytest/_pytest/deprecated.py new file mode 100644 index 0000000000000..e75ff099ee7a2 --- /dev/null +++ b/third_party/python/pytest/_pytest/deprecated.py @@ -0,0 +1,24 @@ +""" +This module contains deprecation messages and bits of code used elsewhere in the codebase +that is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. +""" +from __future__ import absolute_import, division, print_function + +MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \ + 'pass a list of arguments instead.' + +YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0' + +FUNCARG_PREFIX = ( + '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' + 'and scheduled to be removed in pytest 4.0. ' + 'Please remove the prefix and use the @pytest.fixture decorator instead.') + +SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.' + +GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue" + +RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0' diff --git a/third_party/python/pytest/_pytest/doctest.py b/third_party/python/pytest/_pytest/doctest.py index a57f7a4949e3b..fde6dd71dfba7 100644 --- a/third_party/python/pytest/_pytest/doctest.py +++ b/third_party/python/pytest/_pytest/doctest.py @@ -1,22 +1,41 @@ """ discover and run doctests in modules and test files.""" -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import traceback import pytest -from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo -from _pytest.python import FixtureRequest +from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr +from _pytest.fixtures import FixtureRequest +DOCTEST_REPORT_CHOICE_NONE = 'none' +DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff' +DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff' +DOCTEST_REPORT_CHOICE_UDIFF = 'udiff' +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure' + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) def pytest_addoption(parser): parser.addini('doctest_optionflags', 'option flags for doctests', type="args", default=["ELLIPSIS"]) + parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8") group = parser.getgroup("collect") group.addoption("--doctest-modules", action="store_true", default=False, help="run doctests in all .py modules", dest="doctestmodules") + group.addoption("--doctest-report", + type=str.lower, default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport") group.addoption("--doctest-glob", action="append", default=[], metavar="pat", help="doctests file matching pattern, default: test*.txt", @@ -59,7 +78,6 @@ def toterminal(self, tw): class DoctestItem(pytest.Item): - def __init__(self, name, parent, runner=None, dtest=None): super(DoctestItem, self).__init__(name, parent) self.runner = runner @@ -70,7 +88,9 @@ def __init__(self, name, parent, runner=None, dtest=None): def setup(self): if self.dtest is not None: self.fixture_request = _setup_fixtures(self) - globs = dict(getfixture=self.fixture_request.getfuncargvalue) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items(): + globs[name] = value self.dtest.globs.update(globs) def runtest(self): @@ -92,7 +112,7 @@ def repr_failure(self, excinfo): message = excinfo.type.__name__ reprlocation = ReprFileLocation(filename, lineno, message) checker = _get_checker() - REPORT_UDIFF = doctest.REPORT_UDIFF + report_choice = _get_report_choice(self.config.getoption("doctestreport")) if lineno is not None: lines = doctestfailure.test.docstring.splitlines(False) # add line numbers to the left of the error message @@ -108,7 +128,7 @@ def repr_failure(self, excinfo): indent = '...' if excinfo.errisinstance(doctest.DocTestFailure): lines += checker.output_difference(example, - doctestfailure.got, REPORT_UDIFF).split("\n") + doctestfailure.got, report_choice).split("\n") else: inner_excinfo = ExceptionInfo(excinfo.value.exc_info) lines += ["UNEXPECTED EXCEPTION: %s" % @@ -143,30 +163,29 @@ def get_optionflags(parent): flag_acc |= flag_lookup_table[flag] return flag_acc +class DoctestTextfile(pytest.Module): + obj = None -class DoctestTextfile(DoctestItem, pytest.Module): - - def runtest(self): + def collect(self): import doctest - fixture_request = _setup_fixtures(self) # inspired by doctest.testfile; ideally we would use it directly, # but it doesn't support passing a custom checker - text = self.fspath.read() + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) filename = str(self.fspath) name = self.fspath.basename - globs = dict(getfixture=fixture_request.getfuncargvalue) - if '__name__' not in globs: - globs['__name__'] = '__main__' + globs = {'__name__': '__main__'} optionflags = get_optionflags(self) runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, checker=_get_checker()) + _fix_spoof_python2(runner, encoding) parser = doctest.DocTestParser() test = parser.get_doctest(text, globs, name, filename, 0) - _check_all_skipped(test) - runner.run(test) + if test.examples: + yield DoctestItem(test.name, self, runner, test) def _check_all_skipped(test): @@ -197,6 +216,7 @@ def collect(self): optionflags = get_optionflags(self) runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, checker=_get_checker()) + for test in finder.find(module, module.__name__): if test.examples: # skip empty doctests yield DoctestItem(test.name, self, runner, test) @@ -288,3 +308,53 @@ def _get_allow_bytes_flag(): """ import doctest return doctest.register_optionflag('ALLOW_BYTES') + + +def _get_report_choice(key): + """ + This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid + importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +def _fix_spoof_python2(runner, encoding): + """ + Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This + should patch only doctests for text files because they don't have a way to declare their + encoding. Doctests in docstrings from Python modules don't have the same problem given that + Python already decoded the strings. + + This fixes the problem related in issue #2434. + """ + from _pytest.compat import _PY2 + if not _PY2: + return + + from doctest import _SpoofOut + + class UnicodeSpoof(_SpoofOut): + + def getvalue(self): + result = _SpoofOut.getvalue(self) + if encoding: + result = result.decode(encoding) + return result + + runner._fakeout = UnicodeSpoof() + + +@pytest.fixture(scope='session') +def doctest_namespace(): + """ + Inject names into the doctest namespace. + """ + return dict() diff --git a/third_party/python/pytest/_pytest/fixtures.py b/third_party/python/pytest/_pytest/fixtures.py new file mode 100644 index 0000000000000..64d21b9f69633 --- /dev/null +++ b/third_party/python/pytest/_pytest/fixtures.py @@ -0,0 +1,1129 @@ +from __future__ import absolute_import, division, print_function +import sys + +from py._code.code import FormattedExcinfo + +import py +import warnings + +import inspect +import _pytest +from _pytest._code.code import TerminalRepr +from _pytest.compat import ( + NOTSET, exc_clear, _format_args, + getfslineno, get_real_func, + is_generator, isclass, getimfunc, + getlocation, getfuncargnames, + safe_getattr, +) +from _pytest.runner import fail +from _pytest.compat import FuncargnamesCompatAttr + +def pytest_sessionstart(session): + import _pytest.python + scopename2class.update({ + 'class': _pytest.python.Class, + 'module': _pytest.python.Module, + 'function': _pytest.main.Item, + }) + session._fixturemanager = FixtureManager(session) + + +scopename2class = {} + + +scope2props = dict(session=()) +scope2props["module"] = ("fspath", "module") +scope2props["class"] = scope2props["module"] + ("cls",) +scope2props["instance"] = scope2props["class"] + ("instance", ) +scope2props["function"] = scope2props["instance"] + ("function", "keywords") + +def scopeproperty(name=None, doc=None): + def decoratescope(func): + scopename = name or func.__name__ + + def provide(self): + if func.__name__ in scope2props[self.scope]: + return func(self) + raise AttributeError("%s not available in %s-scoped context" % ( + scopename, self.scope)) + + return property(provide, None, None, func.__doc__) + return decoratescope + + +def get_scope_node(node, scope): + cls = scopename2class.get(scope) + if cls is None: + if scope == "session": + return node.session + raise ValueError("unknown scope") + return node.getparent(cls) + + +def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): + # this function will transform all collected calls to a functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + return # this function call does not have direct parametrization + # collect funcargs of all callspecs into a list of values + arg2params = {} + arg2scope = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scopenum = callspec._arg2scopenum.get(argname, + scopenum_function) + arg2scope[argname] = scopes[scopenum] + callspec.funcargs.clear() + + # register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # if we have a scope that is higher than function we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope != "function": + node = get_scope_node(collector, scope) + if node is None: + assert scope == "class" and isinstance(collector, _pytest.python.Module) + # use module-level collector for class-scope (for now) + node = collector + if node and argname in node._name2pseudofixturedef: + arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef(fixturemanager, '', argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, False, False) + arg2fixturedefs[argname] = [fixturedef] + if node is not None: + node._name2pseudofixturedef[argname] = fixturedef + + + +def getfixturemarker(obj): + """ return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + return getattr(obj, "_pytestfixturefunction", None) + except Exception: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + + + +def get_parametrized_fixture_keys(item, scopenum): + """ return list of keys for all parametrized arguments which match + the specified scope. """ + assert scopenum < scopenum_function # function + try: + cs = item.callspec + except AttributeError: + pass + else: + # cs.indictes.items() is random order of argnames but + # then again different functions (items) can change order of + # arguments so it doesn't matter much probably + for argname, param_index in cs.indices.items(): + if cs._arg2scopenum[argname] != scopenum: + continue + if scopenum == 0: # session + key = (argname, param_index) + elif scopenum == 1: # module + key = (argname, param_index, item.fspath) + elif scopenum == 2: # class + key = (argname, param_index, item.fspath, item.cls) + yield key + + +# algorithm for sorting on a per-parametrized resource setup basis +# it is called for scopenum==0 (session) first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns + +def reorder_items(items): + argkeys_cache = {} + for scopenum in range(0, scopenum_function): + argkeys_cache[scopenum] = d = {} + for item in items: + keys = set(get_parametrized_fixture_keys(item, scopenum)) + if keys: + d[item] = keys + return reorder_items_atscope(items, set(), argkeys_cache, 0) + +def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): + if scopenum >= scopenum_function or len(items) < 3: + return items + items_done = [] + while 1: + items_before, items_same, items_other, newignore = \ + slice_items(items, ignore, argkeys_cache[scopenum]) + items_before = reorder_items_atscope( + items_before, ignore, argkeys_cache,scopenum+1) + if items_same is None: + # nothing to reorder in this scope + assert items_other is None + return items_done + items_before + items_done.extend(items_before) + items = items_same + items_other + ignore = newignore + + +def slice_items(items, ignore, scoped_argkeys_cache): + # we pick the first item which uses a fixture instance in the + # requested scope and which we haven't seen yet. We slice the input + # items list into a list of items_nomatch, items_same and + # items_other + if scoped_argkeys_cache: # do we need to do work at all? + it = iter(items) + # first find a slicing key + for i, item in enumerate(it): + argkeys = scoped_argkeys_cache.get(item) + if argkeys is not None: + argkeys = argkeys.difference(ignore) + if argkeys: # found a slicing key + slicing_argkey = argkeys.pop() + items_before = items[:i] + items_same = [item] + items_other = [] + # now slice the remainder of the list + for item in it: + argkeys = scoped_argkeys_cache.get(item) + if argkeys and slicing_argkey in argkeys and \ + slicing_argkey not in ignore: + items_same.append(item) + else: + items_other.append(item) + newignore = ignore.copy() + newignore.add(slicing_argkey) + return (items_before, items_same, items_other, newignore) + return items, None, None, None + + +def fillfixtures(function): + """ fill missing funcargs for a test function. """ + try: + request = function._request + except AttributeError: + # XXX this special code path is only expected to execute + # with the oejskit plugin. It uses classes with funcargs + # and we thus have to work a bit to allow this. + fm = function.session._fixturemanager + fi = fm.getfixtureinfo(function.parent, function.obj, None) + function._fixtureinfo = fi + request = function._request = FixtureRequest(function) + request._fillfixtures() + # prune out funcargs for jstests + newfuncargs = {} + for name in fi.argnames: + newfuncargs[name] = function.funcargs[name] + function.funcargs = newfuncargs + else: + request._fillfixtures() + + + +def get_direct_param_fixture_func(request): + return request.param + +class FuncFixtureInfo: + def __init__(self, argnames, names_closure, name2fixturedefs): + self.argnames = argnames + self.names_closure = names_closure + self.name2fixturedefs = name2fixturedefs + + +class FixtureRequest(FuncargnamesCompatAttr): + """ A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context + and has an optional ``param`` attribute in case + the fixture is parametrized indirectly. + """ + + def __init__(self, pyfuncitem): + self._pyfuncitem = pyfuncitem + #: fixture for which this request is being performed + self.fixturename = None + #: Scope string, one of "function", "class", "module", "session" + self.scope = "function" + self._fixture_values = {} # argname -> fixture value + self._fixture_defs = {} # argname -> FixtureDef + fixtureinfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index = {} + self._fixturemanager = pyfuncitem.session._fixturemanager + + @property + def fixturenames(self): + # backward incompatible note: now a readonly property + return list(self._pyfuncitem._fixtureinfo.names_closure) + + @property + def node(self): + """ underlying collection node (depends on current request scope)""" + return self._getscopeitem(self.scope) + + + def _getnextfixturedef(self, argname): + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # we arrive here because of a a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + self._arg2fixturedefs[argname] = fixturedefs + # fixturedefs list is immutable so we maintain a decreasing index + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self): + """ the pytest config object associated with this request. """ + return self._pyfuncitem.config + + + @scopeproperty() + def function(self): + """ test function object if the request has a per-function scope. """ + return self._pyfuncitem.obj + + @scopeproperty("class") + def cls(self): + """ class (can be None) where the test function was collected. """ + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """ instance (can be None) on which test function was collected. """ + # unittest support hack, see _pytest.unittest.TestCaseFunction + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + if function is not None: + return py.builtin._getimself(function) + + @scopeproperty() + def module(self): + """ python module object where the test function was collected. """ + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @scopeproperty() + def fspath(self): + """ the file system path of the test module which collected this test. """ + return self._pyfuncitem.fspath + + @property + def keywords(self): + """ keywords/markers dictionary for the underlying node. """ + return self.node.keywords + + @property + def session(self): + """ pytest session object. """ + return self._pyfuncitem.session + + def addfinalizer(self, finalizer): + """ add finalizer/teardown function to be called after the + last test within the requesting test context finished + execution. """ + # XXX usually this method is shadowed by fixturedef specific ones + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer, scope): + colitem = self._getscopeitem(scope) + self._pyfuncitem.session._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem) + + def applymarker(self, marker): + """ Apply a marker to a single test function invocation. + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object + created by a call to ``pytest.mark.NAME(...)``. + """ + try: + self.node.keywords[marker.markname] = marker + except AttributeError: + raise ValueError(marker) + + def raiseerror(self, msg): + """ raise a FixtureLookupError with the given message. """ + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self): + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): + """ (deprecated) Return a testing resource managed by ``setup`` & + ``teardown`` calls. ``scope`` and ``extrakey`` determine when the + ``teardown`` function will be called so that subsequent calls to + ``setup`` would recreate the resource. With pytest-2.3 you often + do not need ``cached_setup()`` as you can directly declare a scope + on a fixture function and register a finalizer through + ``request.addfinalizer()``. + + :arg teardown: function receiving a previously setup resource. + :arg setup: a no-argument function creating a resource. + :arg scope: a string value out of ``function``, ``class``, ``module`` + or ``session`` indicating the caching lifecycle of the resource. + :arg extrakey: added to internal caching key of (funcargname, scope). + """ + if not hasattr(self.config, '_setupcache'): + self.config._setupcache = {} # XXX weakref? + cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) + cache = self.config._setupcache + try: + val = cache[cachekey] + except KeyError: + self._check_scope(self.fixturename, self.scope, scope) + val = setup() + cache[cachekey] = val + if teardown is not None: + def finalizer(): + del cache[cachekey] + teardown(val) + self._addfinalizer(finalizer, scope=scope) + return val + + def getfixturevalue(self, argname): + """ Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + """ + return self._get_active_fixturedef(argname).cached_result[0] + + def getfuncargvalue(self, argname): + """ Deprecated, use getfixturevalue. """ + from _pytest import deprecated + warnings.warn( + deprecated.GETFUNCARGVALUE, + DeprecationWarning) + return self.getfixturevalue(argname) + + def _get_active_fixturedef(self, argname): + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + class PseudoFixtureDef: + cached_result = (self, [0], None) + scope = "function" + return PseudoFixtureDef + raise + # remove indent to prevent the python3 exception + # from leaking into the call + result = self._getfixturevalue(fixturedef) + self._fixture_values[argname] = result + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self): + current = self + l = [] + while 1: + fixturedef = getattr(current, "_fixturedef", None) + if fixturedef is None: + l.reverse() + return l + l.append(fixturedef) + current = current._parent_request + + def _getfixturevalue(self, fixturedef): + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef.scope + try: + param = funcitem.callspec.getparam(argname) + except (AttributeError, ValueError): + param = NOTSET + param_index = 0 + if fixturedef.params is not None: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = frameinfo.filename + source_lineno = frameinfo.lineno + source_path = py.path.local(source_path) + if source_path.relto(funcitem.config.rootdir): + source_path = source_path.relto(funcitem.config.rootdir) + msg = ( + "The requested fixture has no parameter defined for the " + "current test.\n\nRequested fixture '{0}' defined in:\n{1}" + "\n\nRequested here:\n{2}:{3}".format( + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootdir), + source_path, + source_lineno, + ) + ) + fail(msg) + else: + # indices might not be set if old-style metafunc.addcall() was used + param_index = funcitem.callspec.indices.get(argname, 0) + # if a parametrize invocation set a scope it will override + # the static scope defined with the fixture function + paramscopenum = funcitem.callspec._arg2scopenum.get(argname) + if paramscopenum is not None: + scope = scopes[paramscopenum] + + subrequest = SubRequest(self, scope, param, param_index, fixturedef) + + # check if a higher-level scoped fixture accesses a lower level one + subrequest._check_scope(argname, self.scope, scope) + + # clear sys.exc_info before invoking the fixture (python bug?) + # if its not explicitly cleared it will leak into the call + exc_clear() + try: + # call the fixture function + val = fixturedef.execute(request=subrequest) + finally: + # if fixture function failed it might have registered finalizers + self.session._setupstate.addfinalizer(fixturedef.finish, + subrequest.node) + return val + + def _check_scope(self, argname, invoking_scope, requested_scope): + if argname == "request": + return + if scopemismatch(invoking_scope, requested_scope): + # try to report something helpful + lines = self._factorytraceback() + fail("ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" % ( + (requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False) + + def _factorytraceback(self): + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + p = self._pyfuncitem.session.fspath.bestrelpath(fs) + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % ( + p, lineno, factory.__name__, args)) + return lines + + def _getscopeitem(self, scope): + if scope == "function": + # this might also be a non-function Item despite its attribute name + return self._pyfuncitem + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope == "class": + # fallback to function item itself + node = self._pyfuncitem + assert node + return node + + def __repr__(self): + return "" %(self.node) + + +class SubRequest(FixtureRequest): + """ a sub request for handling getting a fixture from a + test function/fixture. """ + def __init__(self, request, scope, param, param_index, fixturedef): + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self.scope = scope + self._fixturedef = fixturedef + self.addfinalizer = fixturedef.addfinalizer + self._pyfuncitem = request._pyfuncitem + self._fixture_values = request._fixture_values + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self): + return "" % (self.fixturename, self._pyfuncitem) + + +class ScopeMismatchError(Exception): + """ A fixture function tries to use a different fixture function which + which has a lower scope (e.g. a Session one calls a function one) + """ + + +scopes = "session module class function".split() +scopenum_function = scopes.index("function") + + +def scopemismatch(currentscope, newscope): + return scopes.index(newscope) > scopes.index(currentscope) + + +def scope2index(scope, descr, where=None): + """Look up the index of ``scope`` and raise a descriptive value error + if not defined. + """ + try: + return scopes.index(scope) + except ValueError: + raise ValueError( + "{0} {1}has an unsupported scope value '{2}'".format( + descr, 'from {0} '.format(where) if where else '', + scope) + ) + + +class FixtureLookupError(LookupError): + """ could not return a requested Fixture (missing or invalid). """ + def __init__(self, argname, request, msg=None): + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self): + tblines = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # the last fixture raise an error, let's present + # it at the requesting side + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (IOError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno+1)) + else: + addline("file %s, line %s" % (fspath, lineno+1)) + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith('def'): + break + + if msg is None: + fm = self.request._fixturemanager + available = [] + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist and name not in available: + available.append(name) + msg = "fixture %r not found" % (self.argname,) + msg += "\n available fixtures: %s" %(", ".join(sorted(available)),) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, tblines, errorstring, argname): + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw): + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker, + lines[0].strip()), red=True) + for line in lines[1:]: + tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker, + line.strip()), red=True) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno+1)) + + +def fail_fixturefunc(fixturefunc, msg): + fs, lineno = getfslineno(fixturefunc) + location = "%s:%s" % (fs, lineno+1) + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, + pytrace=False) + + +def call_fixture_func(fixturefunc, request, kwargs): + yieldctx = is_generator(fixturefunc) + if yieldctx: + it = fixturefunc(**kwargs) + res = next(it) + + def teardown(): + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc(fixturefunc, + "yield_fixture function has more than one 'yield'") + + request.addfinalizer(teardown) + else: + res = fixturefunc(**kwargs) + return res + + +class FixtureDef: + """ A container for a factory definition. """ + def __init__(self, fixturemanager, baseid, argname, func, scope, params, + unittest=False, ids=None): + self._fixturemanager = fixturemanager + self.baseid = baseid or '' + self.has_location = baseid is not None + self.func = func + self.argname = argname + self.scope = scope + self.scopenum = scope2index( + scope or "function", + descr='fixture {0}'.format(func.__name__), + where=baseid + ) + self.params = params + startindex = unittest and 1 or None + self.argnames = getfuncargnames(func, startindex=startindex) + self.unittest = unittest + self.ids = ids + self._finalizer = [] + + def addfinalizer(self, finalizer): + self._finalizer.append(finalizer) + + def finish(self): + exceptions = [] + try: + while self._finalizer: + try: + func = self._finalizer.pop() + func() + except: + exceptions.append(sys.exc_info()) + if exceptions: + e = exceptions[0] + del exceptions # ensure we don't keep all frames alive because of the traceback + py.builtin._reraise(*e) + + finally: + ihook = self._fixturemanager.session.ihook + ihook.pytest_fixture_post_finalizer(fixturedef=self) + # even if finalization fails, we invalidate + # the cached fixture value + if hasattr(self, "cached_result"): + del self.cached_result + + def execute(self, request): + # get required arguments and register our own finish() + # with their finalization + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + fixturedef.addfinalizer(self.finish) + + my_cache_key = request.param_index + cached_result = getattr(self, "cached_result", None) + if cached_result is not None: + result, cache_key, err = cached_result + if my_cache_key == cache_key: + if err is not None: + py.builtin._reraise(*err) + else: + return result + # we have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one + self.finish() + assert not hasattr(self, "cached_result") + + ihook = self._fixturemanager.session.ihook + return ihook.pytest_fixture_setup(fixturedef=self, request=request) + + def __repr__(self): + return ("" % + (self.argname, self.scope, self.baseid)) + +def pytest_fixture_setup(fixturedef, request): + """ Execution of fixture setup. """ + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request.scope, fixdef.scope) + kwargs[argname] = result + + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # bind the unbound method to the TestCase instance + fixturefunc = fixturedef.func.__get__(request.instance) + else: + # the fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) + my_cache_key = request.param_index + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except Exception: + fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +class FixtureFunctionMarker: + def __init__(self, scope, params, autouse=False, ids=None, name=None): + self.scope = scope + self.params = params + self.autouse = autouse + self.ids = ids + self.name = name + + def __call__(self, function): + if isclass(function): + raise ValueError( + "class fixtures not supported (may be in the future)") + function._pytestfixturefunction = self + return function + + + +def fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a fixture factory function. + + This decorator can be used (with or without parameters) to define a + fixture function. The name of the fixture function can later be + referenced to cause its invocation ahead of running tests: test + modules or classes can use the pytest.mark.usefixtures(fixturename) + marker. Test functions can directly use fixture names as input + arguments in which case the fixture instance returned from the fixture + function will be injected. + + :arg scope: the scope for which this fixture is shared, one of + "function" (default), "class", "module" or "session". + + :arg params: an optional list of parameters which will cause multiple + invocations of the fixture function and all of the tests + using it. + + :arg autouse: if True, the fixture func is activated for all tests that + can see it. If False (the default) then an explicit + reference is needed to activate the fixture. + + :arg ids: list of string ids each corresponding to the params + so that they are part of the test id. If no ids are provided + they will be generated automatically from the params. + + :arg name: the name of the fixture. This defaults to the name of the + decorated function. If a fixture is used in the same module in + which it is defined, the function name of the fixture will be + shadowed by the function arg that requests the fixture; one way + to resolve this is to name the decorated function + ``fixture_`` and then use + ``@pytest.fixture(name='')``. + + Fixtures can optionally provide their values to test functions using a ``yield`` statement, + instead of ``return``. In this case, the code block after the ``yield`` statement is executed + as teardown code regardless of the test outcome. A fixture function must yield exactly once. + """ + if callable(scope) and params is None and autouse == False: + # direct decoration + return FixtureFunctionMarker( + "function", params, autouse, name=name)(scope) + if params is not None and not isinstance(params, (list, tuple)): + params = list(params) + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + if callable(scope) and params is None and not autouse: + # direct decoration + return FixtureFunctionMarker( + "function", params, autouse, ids=ids, name=name)(scope) + else: + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +defaultfuncargprefixmarker = fixture() + + +@fixture(scope="session") +def pytestconfig(request): + """ the pytest config object with access to command line opts.""" + return request.config + + +class FixtureManager: + """ + pytest fixtures definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i. e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + _argprefix = "pytest_funcarg__" + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session): + self.session = session + self.config = session.config + self._arg2fixturedefs = {} + self._holderobjseen = set() + self._arg2finish = {} + self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] + session.config.pluginmanager.register(self, "funcmanage") + + + def getfixtureinfo(self, node, func, cls, funcargs=True): + if funcargs and not hasattr(node, "nofuncargs"): + if cls is not None: + startindex = 1 + else: + startindex = None + argnames = getfuncargnames(func, startindex) + else: + argnames = () + usefixtures = getattr(func, "usefixtures", None) + initialnames = argnames + if usefixtures is not None: + initialnames = usefixtures.args + initialnames + fm = node.session._fixturemanager + names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, + node) + return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin): + nodeid = None + try: + p = py.path.local(plugin.__file__) + except AttributeError: + pass + else: + # construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id) + if p.basename.startswith("conftest.py"): + nodeid = p.dirpath().relto(self.config.rootdir) + if p.sep != "/": + nodeid = nodeid.replace(p.sep, "/") + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid): + """ return a tuple of fixture names to be used. """ + autousenames = [] + for baseid, basenames in self._nodeid_and_autousenames: + if nodeid.startswith(baseid): + if baseid: + i = len(baseid) + nextchar = nodeid[i:i+1] + if nextchar and nextchar not in ":/": + continue + autousenames.extend(basenames) + # make sure autousenames are sorted by scope, scopenum 0 is session + autousenames.sort( + key=lambda x: self._arg2fixturedefs[x][-1].scopenum) + return autousenames + + def getfixtureclosure(self, fixturenames, parentnode): + # collect the closure of all fixtures , starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return a arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive) + + parentid = parentnode.nodeid + fixturenames_closure = self._getautousenames(parentid) + + def merge(otherlist): + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + arg2fixturedefs = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + return fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc): + for argname in metafunc.fixturenames: + faclist = metafunc._arg2fixturedefs.get(argname) + if faclist: + fixturedef = faclist[-1] + if fixturedef.params is not None: + func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) + # skip directly parametrized arguments + argnames = func_params[0] + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + if argname not in func_params and argname not in argnames: + metafunc.parametrize(argname, fixturedef.params, + indirect=True, scope=fixturedef.scope, + ids=fixturedef.ids) + else: + continue # will raise FixtureLookupError at setup time + + def pytest_collection_modifyitems(self, items): + # separate parametrized setups + items[:] = reorder_items(items) + + def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) + # or are "@pytest.fixture" marked + marker = getfixturemarker(obj) + if marker is None: + if not name.startswith(self._argprefix): + continue + if not callable(obj): + continue + marker = defaultfuncargprefixmarker + from _pytest import deprecated + self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid) + name = name[len(self._argprefix):] + elif not isinstance(marker, FixtureFunctionMarker): + # magic globals with __getattr__ might have got us a wrong + # fixture attribute + continue + else: + if marker.name: + name = marker.name + msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \ + 'and be decorated with @pytest.fixture:\n%s' % name + assert not name.startswith(self._argprefix), msg + + fixture_def = FixtureDef(self, nodeid, name, obj, + marker.scope, marker.params, + unittest=unittest, ids=marker.ids) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_and_autousenames.append((nodeid or '', autousenames)) + + def getfixturedefs(self, argname, nodeid): + """ + Gets a list of fixtures which are applicable to the given node id. + + :param str argname: name of the fixture to search for + :param str nodeid: full node id of the requesting test. + :return: list[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + else: + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories(self, fixturedefs, nodeid): + for fixturedef in fixturedefs: + if nodeid.startswith(fixturedef.baseid): + yield fixturedef + diff --git a/third_party/python/pytest/_pytest/freeze_support.py b/third_party/python/pytest/_pytest/freeze_support.py new file mode 100644 index 0000000000000..52f86087fff64 --- /dev/null +++ b/third_party/python/pytest/_pytest/freeze_support.py @@ -0,0 +1,44 @@ +""" +Provides a function to report all internal modules for using freezing tools +pytest +""" +from __future__ import absolute_import, division, print_function + + + +def freeze_includes(): + """ + Returns a list of module names used by py.test that should be + included by cx_freeze. + """ + import py + import _pytest + result = list(_iter_all_modules(py)) + result += list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules(package, prefix=''): + """ + Iterates over the names of all modules that can be found in the given + package, recursively. + Example: + _iter_all_modules(_pytest) -> + ['_pytest.assertion.newinterpret', + '_pytest.capture', + '_pytest.core', + ... + ] + """ + import os + import pkgutil + if type(package) is not str: + path, prefix = package.__path__[0], package.__name__ + '.' + else: + path = package + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'): + yield prefix + m + else: + yield prefix + name diff --git a/third_party/python/pytest/_pytest/genscript.py b/third_party/python/pytest/_pytest/genscript.py deleted file mode 100755 index d2962d8fc82f0..0000000000000 --- a/third_party/python/pytest/_pytest/genscript.py +++ /dev/null @@ -1,132 +0,0 @@ -""" (deprecated) generate a single-file self-contained version of pytest """ -import os -import sys -import pkgutil - -import py -import _pytest - - - -def find_toplevel(name): - for syspath in sys.path: - base = py.path.local(syspath) - lib = base/name - if lib.check(dir=1): - return lib - mod = base.join("%s.py" % name) - if mod.check(file=1): - return mod - raise LookupError(name) - -def pkgname(toplevel, rootpath, path): - parts = path.parts()[len(rootpath.parts()):] - return '.'.join([toplevel] + [x.purebasename for x in parts]) - -def pkg_to_mapping(name): - toplevel = find_toplevel(name) - name2src = {} - if toplevel.check(file=1): # module - name2src[toplevel.purebasename] = toplevel.read() - else: # package - for pyfile in toplevel.visit('*.py'): - pkg = pkgname(name, toplevel, pyfile) - name2src[pkg] = pyfile.read() - # with wheels py source code might be not be installed - # and the resulting genscript is useless, just bail out. - assert name2src, "no source code found for %r at %r" %(name, toplevel) - return name2src - -def compress_mapping(mapping): - import base64, pickle, zlib - data = pickle.dumps(mapping, 2) - data = zlib.compress(data, 9) - data = base64.encodestring(data) - data = data.decode('ascii') - return data - - -def compress_packages(names): - mapping = {} - for name in names: - mapping.update(pkg_to_mapping(name)) - return compress_mapping(mapping) - -def generate_script(entry, packages): - data = compress_packages(packages) - tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py') - exe = tmpl.read() - exe = exe.replace('@SOURCES@', data) - exe = exe.replace('@ENTRY@', entry) - return exe - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption("--genscript", action="store", default=None, - dest="genscript", metavar="path", - help="create standalone pytest script at given target path.") - -def pytest_cmdline_main(config): - import _pytest.config - genscript = config.getvalue("genscript") - if genscript: - tw = _pytest.config.create_terminal_writer(config) - tw.line("WARNING: usage of genscript is deprecated.", - red=True) - deps = ['py', '_pytest', 'pytest'] # pluggy is vendored - if sys.version_info < (2,7): - deps.append("argparse") - tw.line("generated script will run on python2.6-python3.3++") - else: - tw.line("WARNING: generated script will not run on python2.6 " - "due to 'argparse' dependency. Use python2.6 " - "to generate a python2.6 compatible script", red=True) - script = generate_script( - 'import pytest; raise SystemExit(pytest.cmdline.main())', - deps, - ) - genscript = py.path.local(genscript) - genscript.write(script) - tw.line("generated pytest standalone script: %s" % genscript, - bold=True) - return 0 - - -def pytest_namespace(): - return {'freeze_includes': freeze_includes} - - -def freeze_includes(): - """ - Returns a list of module names used by py.test that should be - included by cx_freeze. - """ - result = list(_iter_all_modules(py)) - result += list(_iter_all_modules(_pytest)) - return result - - -def _iter_all_modules(package, prefix=''): - """ - Iterates over the names of all modules that can be found in the given - package, recursively. - - Example: - _iter_all_modules(_pytest) -> - ['_pytest.assertion.newinterpret', - '_pytest.capture', - '_pytest.core', - ... - ] - """ - if type(package) is not str: - path, prefix = package.__path__[0], package.__name__ + '.' - else: - path = package - for _, name, is_package in pkgutil.iter_modules([path]): - if is_package: - for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'): - yield prefix + m - else: - yield prefix + name diff --git a/third_party/python/pytest/_pytest/helpconfig.py b/third_party/python/pytest/_pytest/helpconfig.py index 1df0c56ac7b86..e3c6b6e99777e 100644 --- a/third_party/python/pytest/_pytest/helpconfig.py +++ b/third_party/python/pytest/_pytest/helpconfig.py @@ -1,13 +1,48 @@ """ version info, help messages, tracing configuration. """ +from __future__ import absolute_import, division, print_function + import py import pytest +from _pytest.config import PrintHelp import os, sys +from argparse import Action + + +class HelpAction(Action): + """This is an argparse Action that will raise an exception in + order to skip the rest of the argument parsing when --help is passed. + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, + option_strings, + dest=None, + default=False, + help=None): + super(HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done + if getattr(parser._parser, 'after_preparse', False): + raise PrintHelp + def pytest_addoption(parser): group = parser.getgroup('debugconfig') group.addoption('--version', action="store_true", help="display pytest lib version and import information.") - group._addoption("-h", "--help", action="store_true", dest="help", + group._addoption("-h", "--help", action=HelpAction, dest="help", help="show help message and configuration info") group._addoption('-p', action="append", dest="plugins", default = [], metavar="name", @@ -20,6 +55,10 @@ def pytest_addoption(parser): group.addoption('--debug', action="store_true", dest="debug", default=False, help="store internal tracing debug information in 'pytestdebug.log'.") + group._addoption( + '-o', '--override-ini', nargs='*', dest="override_ini", + action="append", + help="override config option with option=value style, e.g. `-o xfail_strict=True`.") @pytest.hookimpl(hookwrapper=True) @@ -37,12 +76,14 @@ def pytest_cmdline_parse(): config.trace.root.setwriter(debugfile.write) undo_tracing = config.pluginmanager.enable_tracing() sys.stderr.write("writing pytestdebug information to %s\n" % path) + def unset_tracing(): debugfile.close() sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) config.trace.root.setwriter(None) undo_tracing() + config.add_cleanup(unset_tracing) def pytest_cmdline_main(config): @@ -67,9 +108,8 @@ def showhelp(config): tw.write(config._parser.optparser.format_help()) tw.line() tw.line() - #tw.sep( "=", "config file settings") - tw.line("[pytest] ini-options in the next " - "pytest.ini|tox.ini|setup.cfg file:") + tw.line("[pytest] ini-options in the first " + "pytest.ini|tox.ini|setup.cfg file found:") tw.line() for name in config._parser._ininames: @@ -92,8 +132,8 @@ def showhelp(config): tw.line() tw.line() - tw.line("to see available markers type: py.test --markers") - tw.line("to see available fixtures type: py.test --fixtures") + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") tw.line("(shown according to specified file_or_dir or current dir " "if not specified)") diff --git a/third_party/python/pytest/_pytest/hookspec.py b/third_party/python/pytest/_pytest/hookspec.py index 60e9b47d2621c..2c9a661635932 100644 --- a/third_party/python/pytest/_pytest/hookspec.py +++ b/third_party/python/pytest/_pytest/hookspec.py @@ -16,7 +16,9 @@ def pytest_addhooks(pluginmanager): @hookspec(historic=True) def pytest_namespace(): - """return dict of name->object to be made globally available in + """ + DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged + return dict of name->object to be made globally available in the pytest namespace. This hook is called at plugin registration time. """ @@ -34,7 +36,7 @@ def pytest_addoption(parser): .. note:: This function should be implemented only in plugins or ``conftest.py`` - files situated at the tests root directory due to how py.test + files situated at the tests root directory due to how pytest :ref:`discovers plugins during startup `. :arg parser: To add command line options, call @@ -71,7 +73,9 @@ def pytest_configure(config): @hookspec(firstresult=True) def pytest_cmdline_parse(pluginmanager, args): - """return initialized config object, parsing the specified args. """ + """return initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_cmdline_preparse(config, args): """(deprecated) modify command line arguments before option parsing. """ @@ -79,7 +83,9 @@ def pytest_cmdline_preparse(config, args): @hookspec(firstresult=True) def pytest_cmdline_main(config): """ called for performing the main command line action. The default - implementation will invoke the configure hooks and runtest_mainloop. """ + implementation will invoke the configure hooks and runtest_mainloop. + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_load_initial_conftests(early_config, parser, args): """ implements the loading of initial conftest files ahead @@ -92,7 +98,9 @@ def pytest_load_initial_conftests(early_config, parser, args): @hookspec(firstresult=True) def pytest_collection(session): - """ perform the collection protocol for the given session. """ + """ perform the collection protocol for the given session. + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_collection_modifyitems(session, config, items): """ called after collection has been performed, may filter or re-order @@ -106,11 +114,15 @@ def pytest_ignore_collect(path, config): """ return True to prevent considering this path for collection. This hook is consulted for all files and directories prior to calling more specific hooks. + + Stops at first non-None result, see :ref:`firstresult` """ @hookspec(firstresult=True) def pytest_collect_directory(path, parent): - """ called before traversing a directory for collection files. """ + """ called before traversing a directory for collection files. + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_collect_file(path, parent): """ return collection Node or None for the given path. Any new node @@ -131,7 +143,9 @@ def pytest_deselected(items): @hookspec(firstresult=True) def pytest_make_collect_report(collector): - """ perform ``collector.collect()`` and return a CollectReport. """ + """ perform ``collector.collect()`` and return a CollectReport. + + Stops at first non-None result, see :ref:`firstresult` """ # ------------------------------------------------------------------------- # Python test function related hooks @@ -143,19 +157,32 @@ def pytest_pycollect_makemodule(path, parent): This hook will be called for each matching test module path. The pytest_collect_file hook needs to be used if you want to create test modules for files that do not match as a test module. - """ + + Stops at first non-None result, see :ref:`firstresult` """ @hookspec(firstresult=True) def pytest_pycollect_makeitem(collector, name, obj): - """ return custom item/collector for a python object in a module, or None. """ + """ return custom item/collector for a python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult` """ @hookspec(firstresult=True) def pytest_pyfunc_call(pyfuncitem): - """ call underlying test function. """ + """ call underlying test function. + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_generate_tests(metafunc): """ generate (multiple) parametrized calls to a test function.""" +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config, val, argname): + """Return a user-friendly string representation of the given ``val`` that will be used + by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult` """ + # ------------------------------------------------------------------------- # generic runtest related hooks # ------------------------------------------------------------------------- @@ -163,7 +190,9 @@ def pytest_generate_tests(metafunc): @hookspec(firstresult=True) def pytest_runtestloop(session): """ called for performing the main runtest loop - (after collection finished). """ + (after collection finished). + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_itemstart(item, node): """ (deprecated, use pytest_runtest_logstart). """ @@ -181,7 +210,9 @@ def pytest_runtest_protocol(item, nextitem): :py:func:`pytest_runtest_teardown`. :return boolean: True if no further hook implementations should be invoked. - """ + + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_runtest_logstart(nodeid, location): """ signal the start of running a single test item. """ @@ -204,14 +235,30 @@ def pytest_runtest_teardown(item, nextitem): @hookspec(firstresult=True) def pytest_runtest_makereport(item, call): """ return a :py:class:`_pytest.runner.TestReport` object - for the given :py:class:`pytest.Item` and + for the given :py:class:`pytest.Item <_pytest.main.Item>` and :py:class:`_pytest.runner.CallInfo`. - """ + + Stops at first non-None result, see :ref:`firstresult` """ def pytest_runtest_logreport(report): """ process a test setup/call/teardown report relating to the respective phase of executing a test. """ +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + +@hookspec(firstresult=True) +def pytest_fixture_setup(fixturedef, request): + """ performs fixture setup execution. + + Stops at first non-None result, see :ref:`firstresult` """ + +def pytest_fixture_post_finalizer(fixturedef): + """ called after fixture teardown, but before the cache is cleared so + the fixture result cache ``fixturedef.cached_result`` can + still be accessed.""" + # ------------------------------------------------------------------------- # test session related hooks # ------------------------------------------------------------------------- @@ -227,7 +274,7 @@ def pytest_unconfigure(config): # ------------------------------------------------------------------------- -# hooks for customising the assert methods +# hooks for customizing the assert methods # ------------------------------------------------------------------------- def pytest_assertrepr_compare(config, op, left, right): @@ -236,7 +283,7 @@ def pytest_assertrepr_compare(config, op, left, right): Return None for no custom explanation, otherwise return a list of strings. The strings will be joined by newlines but any newlines *in* a string will be escaped. Note that all but the first line will - be indented sligthly, the intention is for the first line to be a summary. + be indented slightly, the intention is for the first line to be a summary. """ # ------------------------------------------------------------------------- @@ -244,13 +291,22 @@ def pytest_assertrepr_compare(config, op, left, right): # ------------------------------------------------------------------------- def pytest_report_header(config, startdir): - """ return a string to be displayed as header info for terminal reporting.""" + """ return a string to be displayed as header info for terminal reporting. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + """ @hookspec(firstresult=True) def pytest_report_teststatus(report): - """ return result-category, shortletter and verbose word for reporting.""" + """ return result-category, shortletter and verbose word for reporting. -def pytest_terminal_summary(terminalreporter): + Stops at first non-None result, see :ref:`firstresult` """ + +def pytest_terminal_summary(terminalreporter, exitstatus): """ add additional section in terminal summary reporting. """ @@ -266,7 +322,9 @@ def pytest_logwarning(message, code, nodeid, fslocation): @hookspec(firstresult=True) def pytest_doctest_prepare_content(content): - """ return processed content for a given doctest""" + """ return processed content for a given doctest + + Stops at first non-None result, see :ref:`firstresult` """ # ------------------------------------------------------------------------- # error handling and internal debugging hooks diff --git a/third_party/python/pytest/_pytest/junitxml.py b/third_party/python/pytest/_pytest/junitxml.py index f4de1343ed1bd..301633706a903 100644 --- a/third_party/python/pytest/_pytest/junitxml.py +++ b/third_party/python/pytest/_pytest/junitxml.py @@ -4,16 +4,20 @@ Based on initial code from Ross Lawley. + +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd """ -# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ -# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +from __future__ import absolute_import, division, print_function +import functools import py import os import re import sys import time import pytest +from _pytest.config import filename_arg # Python 2.X and 3.X compatibility if sys.version_info[0] < 3: @@ -27,6 +31,7 @@ class Junit(py.xml.Namespace): pass + # We need to get the subset of the invalid unicode ranges according to # XML 1.0 which are valid in this python build. Hence we calculate # this dynamically instead of hardcoding it. The spec range of valid @@ -102,6 +107,8 @@ def record_testreport(self, testreport): } if testreport.location[1] is not None: attrs["line"] = testreport.location[1] + if hasattr(testreport, "url"): + attrs["url"] = testreport.url self.attrs = attrs def to_xml(self): @@ -116,19 +123,15 @@ def _add_simple(self, kind, message, data=None): node = kind(data, message=message) self.append(node) - def _write_captured_output(self, report): + def write_captured_output(self, report): for capname in ('out', 'err'): - allcontent = "" - for name, content in report.get_sections("Captured std%s" % - capname): - allcontent += content - if allcontent: + content = getattr(report, 'capstd' + capname) + if content: tag = getattr(Junit, 'system-' + capname) - self.append(tag(bin_xml_escape(allcontent))) + self.append(tag(bin_xml_escape(content))) def append_pass(self, report): self.add_stats('passed') - self._write_captured_output(report) def append_failure(self, report): # msg = str(report.longrepr.reprtraceback.extraline) @@ -147,7 +150,6 @@ def append_failure(self, report): fail = Junit.failure(message=message) fail.append(bin_xml_escape(report.longrepr)) self.append(fail) - self._write_captured_output(report) def append_collect_error(self, report): # msg = str(report.longrepr.reprtraceback.extraline) @@ -159,9 +161,12 @@ def append_collect_skipped(self, report): Junit.skipped, "collection skipped", report.longrepr) def append_error(self, report): + if getattr(report, 'when', None) == 'teardown': + msg = "test teardown failure" + else: + msg = "test setup failure" self._add_simple( - Junit.error, "test setup failure", report.longrepr) - self._write_captured_output(report) + Junit.error, msg, report.longrepr) def append_skipped(self, report): if hasattr(report, "wasxfail"): @@ -176,7 +181,7 @@ def append_skipped(self, report): Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason), type="pytest.skip", message=skipreason)) - self._write_captured_output(report) + self.write_captured_output(report) def finalize(self): data = self.to_xml().unicode(indent=0) @@ -186,8 +191,8 @@ def finalize(self): @pytest.fixture def record_xml_property(request): - """Fixture that adds extra xml properties to the tag for the calling test. - The fixture is callable with (name, value), with value being automatically + """Add extra xml properties to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. """ request.node.warn( @@ -212,6 +217,7 @@ def pytest_addoption(parser): action="store", dest="xmlpath", metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), default=None, help="create junit-xml style report file at given path.") group.addoption( @@ -220,13 +226,14 @@ def pytest_addoption(parser): metavar="str", default=None, help="prepend prefix to classnames in junit-xml output") + parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest") def pytest_configure(config): xmlpath = config.option.xmlpath # prevent opening xmllog on slave nodes (xdist) if xmlpath and not hasattr(config, 'slaveinput'): - config._xml = LogXML(xmlpath, config.option.junitprefix) + config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name")) config.pluginmanager.register(config._xml) @@ -253,10 +260,11 @@ def mangle_test_address(address): class LogXML(object): - def __init__(self, logfile, prefix): + def __init__(self, logfile, prefix, suite_name="pytest"): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix + self.suite_name = suite_name self.stats = dict.fromkeys([ 'error', 'passed', @@ -265,6 +273,10 @@ def __init__(self, logfile, prefix): ], 0) self.node_reporters = {} # nodeid -> _NodeReporter self.node_reporters_ordered = [] + self.global_properties = [] + # List of reports that failed on call but teardown is pending. + self.open_reports = [] + self.cnt_double_fail_tests = 0 def finalize(self, report): nodeid = getattr(report, 'nodeid', report) @@ -284,9 +296,12 @@ def node_reporter(self, report): if key in self.node_reporters: # TODO: breasks for --dist=each return self.node_reporters[key] + reporter = _NodeReporter(nodeid, self) + self.node_reporters[key] = reporter self.node_reporters_ordered.append(reporter) + return reporter def add_stats(self, key): @@ -321,14 +336,33 @@ def pytest_runtest_logreport(self, report): -> teardown node2 -> teardown node1 """ + close_report = None if report.passed: if report.when == "call": # ignore setup/teardown reporter = self._opentestcase(report) reporter.append_pass(report) elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + (rep for rep in self.open_reports + if (rep.nodeid == report.nodeid and + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid + ) + ), None) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema + self.finalize(close_report) + self.cnt_double_fail_tests += 1 reporter = self._opentestcase(report) if report.when == "call": reporter.append_failure(report) + self.open_reports.append(report) else: reporter.append_error(report) elif report.skipped: @@ -336,7 +370,20 @@ def pytest_runtest_logreport(self, report): reporter.append_skipped(report) self.update_testcase_duration(report) if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + (rep for rep in self.open_reports + if (rep.nodeid == report.nodeid and + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid + ) + ), None) + if close_report: + self.open_reports.remove(close_report) def update_testcase_duration(self, report): """accumulates total duration for nodeid from given report and updates @@ -369,12 +416,15 @@ def pytest_sessionfinish(self): suite_stop_time = time.time() suite_time_delta = suite_stop_time - self.suite_start_time - numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] - + numtests = (self.stats['passed'] + self.stats['failure'] + + self.stats['skipped'] + self.stats['error'] - + self.cnt_double_fail_tests) logfile.write('') + logfile.write(Junit.testsuite( + self._get_global_properties_node(), [x.to_xml() for x in self.node_reporters_ordered], - name="pytest", + name=self.suite_name, errors=self.stats['error'], failures=self.stats['failure'], skips=self.stats['skipped'], @@ -385,3 +435,18 @@ def pytest_sessionfinish(self): def pytest_terminal_summary(self, terminalreporter): terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) + + def add_global_property(self, name, value): + self.global_properties.append((str(name), bin_xml_escape(value))) + + def _get_global_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.global_properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] + ) + return '' diff --git a/third_party/python/pytest/_pytest/main.py b/third_party/python/pytest/_pytest/main.py index 8654d7af627d9..e6f679a376970 100644 --- a/third_party/python/pytest/_pytest/main.py +++ b/third_party/python/pytest/_pytest/main.py @@ -1,19 +1,20 @@ """ core implementation of testing process: init, session, runtest loop. """ -import imp +from __future__ import absolute_import, division, print_function + +import functools import os -import re import sys import _pytest import _pytest._code import py -import pytest try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin -from _pytest.runner import collect_one_node +from _pytest.config import directory_arg, UsageError, hookimpl +from _pytest.runner import collect_one_node, exit tracebackcutdir = py.path.local(_pytest.__file__).dirpath() @@ -25,11 +26,10 @@ EXIT_USAGEERROR = 4 EXIT_NOTESTSCOLLECTED = 5 -name_re = re.compile("^[a-zA-Z_]\w*$") def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", - type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg']) + type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.", type="args", default=[]) #parser.addini("dirpatterns", @@ -38,8 +38,8 @@ def pytest_addoption(parser): # "**/test_*.py", "**/*_test.py"] #) group = parser.getgroup("general", "running and selection options") - group._addoption('-x', '--exitfirst', action="store_true", default=False, - dest="exitfirst", + group._addoption('-x', '--exitfirst', action="store_const", + dest="maxfail", const=1, help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", action="store", type=int, dest="maxfail", default=0, @@ -48,6 +48,9 @@ def pytest_addoption(parser): help="run pytest in strict mode, warnings become errors.") group._addoption("-c", metavar="file", type=str, dest="inifilename", help="load configuration from `file` instead of trying to locate one of the implicit configuration files.") + group._addoption("--continue-on-collection-errors", action="store_true", + default=False, dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", @@ -59,11 +62,14 @@ def pytest_addoption(parser): # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, - metavar="dir", + metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), help="only load conftest.py's relative to specified dir.") group.addoption('--noconftest', action="store_true", dest="noconftest", default=False, help="Don't load any conftest.py files.") + group.addoption('--keepduplicates', '--keep-duplicates', action="store_true", + dest="keepduplicates", default=False, + help="Keep duplicate tests.") group = parser.getgroup("debugconfig", "test session debugging and configuration") @@ -71,14 +77,19 @@ def pytest_addoption(parser): help="base temporary directory for this test run.") + def pytest_namespace(): - collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) - return dict(collect=collect) + """keeping this one works around a deeper startup issue in pytest + + i tried to find it for a while but the amount of time turned unsustainable, + so i put a hack in to revisit later + """ + return {} + def pytest_configure(config): - pytest.config = config # compatibiltiy - if config.option.exitfirst: - config.option.maxfail = 1 + __import__('pytest').config = config # compatibiltiy + def wrap_session(config, doit): """Skeleton command line program""" @@ -92,10 +103,13 @@ def wrap_session(config, doit): config.hook.pytest_sessionstart(session=session) initstate = 2 session.exitstatus = doit(config, session) or 0 - except pytest.UsageError: + except UsageError: raise except KeyboardInterrupt: excinfo = _pytest._code.ExceptionInfo() + if initstate < 2 and isinstance(excinfo.value, exit.Exception): + sys.stderr.write('{0}: {1}\n'.format( + excinfo.typename, excinfo.value.msg)) config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED except: @@ -115,9 +129,11 @@ def wrap_session(config, doit): config._ensure_unconfigure() return session.exitstatus + def pytest_cmdline_main(config): return wrap_session(config, _main) + def _main(config, session): """ default command line protocol for initialization, session, running tests and reporting. """ @@ -129,37 +145,49 @@ def _main(config, session): elif session.testscollected == 0: return EXIT_NOTESTSCOLLECTED + def pytest_collection(session): return session.perform_collect() + def pytest_runtestloop(session): + if (session.testsfailed and + not session.config.option.continue_on_collection_errors): + raise session.Interrupted( + "%d errors during collection" % session.testsfailed) + if session.config.option.collectonly: return True - def getnextitem(i): - # this is a function to avoid python2 - # keeping sys.exc_info set when calling into a test - # python2 keeps sys.exc_info till the frame is left - try: - return session.items[i+1] - except IndexError: - return None - for i, item in enumerate(session.items): - nextitem = getnextitem(i) + nextitem = session.items[i+1] if i+1 < len(session.items) else None item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True + def pytest_ignore_collect(path, config): - p = path.dirpath() - ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) - return path in ignore_paths + + if py.path.local(path) in ignore_paths: + return True + + # Skip duplicate paths. + keepduplicates = config.getoption("keepduplicates") + duplicate_paths = config.pluginmanager._duplicatepaths + if not keepduplicates: + if path in duplicate_paths: + return True + else: + duplicate_paths.add(path) + + return False + class FSHookProxy: def __init__(self, fspath, pm, remove_mods): @@ -172,12 +200,22 @@ def __getattr__(self, name): self.__dict__[name] = x return x -def compatproperty(name): - def fget(self): - # deprecated - use pytest.name - return getattr(pytest, name) +class _CompatProperty(object): + def __init__(self, name): + self.name = name + + def __get__(self, obj, owner): + if obj is None: + return self + + # TODO: reenable in the features branch + # warnings.warn( + # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( + # name=self.name, owner=type(owner).__name__), + # PendingDeprecationWarning, stacklevel=2) + return getattr(__import__('pytest'), self.name) + - return property(fget) class NodeKeywords(MappingMixin): def __init__(self, node): @@ -249,19 +287,23 @@ def ihook(self): """ fspath sensitive hook proxy used to call pytest hooks""" return self.session.gethookproxy(self.fspath) - Module = compatproperty("Module") - Class = compatproperty("Class") - Instance = compatproperty("Instance") - Function = compatproperty("Function") - File = compatproperty("File") - Item = compatproperty("Item") + Module = _CompatProperty("Module") + Class = _CompatProperty("Class") + Instance = _CompatProperty("Instance") + Function = _CompatProperty("Function") + File = _CompatProperty("File") + Item = _CompatProperty("Item") def _getcustomclass(self, name): - cls = getattr(self, name) - if cls != getattr(pytest, name): - py.log._apiwarn("2.0", "use of node.%s is deprecated, " - "use pytest_pycollect_makeitem(...) to create custom " - "collection nodes" % name) + maybe_compatprop = getattr(type(self), name) + if isinstance(maybe_compatprop, _CompatProperty): + return getattr(__import__('pytest'), name) + else: + cls = getattr(self, name) + # TODO: reenable in the features branch + # warnings.warn("use of node.%s is deprecated, " + # "use pytest_pycollect_makeitem(...) to create custom " + # "collection nodes" % name, category=DeprecationWarning) return cls def __repr__(self): @@ -275,9 +317,6 @@ def warn(self, code, message): fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) - else: - fslocation = "%s:%s" % fslocation[:2] - self.ihook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, nodeid=self.nodeid, fslocation=fslocation)) @@ -338,9 +377,9 @@ def add_marker(self, marker): ``marker`` can be a string or pytest.mark.* instance. """ - from _pytest.mark import MarkDecorator + from _pytest.mark import MarkDecorator, MARK_GEN if isinstance(marker, py.builtin._basestring): - marker = MarkDecorator(marker) + marker = getattr(MARK_GEN, marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker @@ -392,7 +431,10 @@ def _repr_failure_py(self, excinfo, style=None): if self.config.option.fulltrace: style="long" else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb tbfilter = False # prunetraceback already does it if style == "auto": style = "long" @@ -403,7 +445,13 @@ def _repr_failure_py(self, excinfo, style=None): else: style = "long" - return excinfo.getrepr(funcargs=True, + try: + os.getcwd() + abspath = False + except OSError: + abspath = True + + return excinfo.getrepr(funcargs=True, abspath=abspath, showlocals=self.config.option.showlocals, style=style, tbfilter=tbfilter) @@ -430,10 +478,6 @@ def repr_failure(self, excinfo): return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") - def _memocollect(self): - """ internal helper method to cache results of calling collect(). """ - return self._memoizedcall('_collected', lambda: list(self.collect())) - def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback @@ -510,7 +554,6 @@ class Session(FSCollector): def __init__(self, config): FSCollector.__init__(self, config.rootdir, parent=None, config=config, session=self) - self._fs2hookproxy = {} self.testsfailed = 0 self.testscollected = 0 self.shouldstop = False @@ -522,12 +565,12 @@ def __init__(self, config): def _makeid(self): return "" - @pytest.hookimpl(tryfirst=True) + @hookimpl(tryfirst=True) def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) - @pytest.hookimpl(tryfirst=True) + @hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self.testsfailed += 1 @@ -541,28 +584,24 @@ def isinitpath(self, path): return path in self._initialpaths def gethookproxy(self, fspath): - try: - return self._fs2hookproxy[fspath] - except KeyError: - # check if we have the common case of running - # hooks with all conftest.py filesall conftest.py - pm = self.config.pluginmanager - my_conftestmodules = pm._getconftestmodules(fspath) - remove_mods = pm._conftest_plugins.difference(my_conftestmodules) - if remove_mods: - # one or more conftests are not in use at this fspath - proxy = FSHookProxy(fspath, pm, remove_mods) - else: - # all plugis are active for this fspath - proxy = self.config.hook - - self._fs2hookproxy[fspath] = proxy - return proxy + # check if we have the common case of running + # hooks with all conftest.py filesall conftest.py + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy def perform_collect(self, args=None, genitems=True): hook = self.config.hook try: items = self._perform_collect(args, genitems) + self.config.pluginmanager.check_pending() hook.pytest_collection_modifyitems(session=self, config=self.config, items=items) finally: @@ -591,8 +630,8 @@ def _perform_collect(self, args, genitems): for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) errors.append("not found: %s\n%s" % (arg, line)) - #XXX: test this - raise pytest.UsageError(*errors) + # XXX: test this + raise UsageError(*errors) if not genitems: return rep.result else: @@ -620,7 +659,7 @@ def _collect(self, arg): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): - assert not names, "invalid arg %r" %(arg,) + assert not names, "invalid arg %r" % (arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): @@ -649,44 +688,41 @@ def _recurse(self, path): return True def _tryconvertpyarg(self, x): - mod = None - path = [os.path.abspath('.')] + sys.path - for name in x.split('.'): - # ignore anything that's not a proper name here - # else something like --pyargs will mess up '.' - # since imp.find_module will actually sometimes work for it - # but it's supposed to be considered a filesystem path - # not a package - if name_re.match(name) is None: - return x - try: - fd, mod, type_ = imp.find_module(name, path) - except ImportError: - return x - else: - if fd is not None: - fd.close() + """Convert a dotted module name to path. - if type_[2] != imp.PKG_DIRECTORY: - path = [os.path.dirname(mod)] - else: - path = [mod] - return mod + """ + import pkgutil + try: + loader = pkgutil.find_loader(x) + except ImportError: + return x + if loader is None: + return x + # This method is sometimes invoked when AssertionRewritingHook, which + # does not define a get_filename method, is already in place: + try: + path = loader.get_filename(x) + except AttributeError: + # Retrieve path from AssertionRewritingHook: + path = loader.modules[x][0].co_filename + if loader.is_package(x): + path = os.path.dirname(path) + return path def _parsearg(self, arg): """ return (fspath, names) tuple after checking the file exists. """ - arg = str(arg) - if self.config.option.pyargs: - arg = self._tryconvertpyarg(arg) parts = str(arg).split("::") + if self.config.option.pyargs: + parts[0] = self._tryconvertpyarg(parts[0]) relpath = parts[0].replace("/", os.sep) path = self.config.invocation_dir.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: - msg = "file or package not found: " + raise UsageError( + "file or package not found: " + arg + + " (missing __init__.py?)") else: - msg = "file not found: " - raise pytest.UsageError(msg + arg) + raise UsageError("file not found: " + arg) parts[0] = path return parts @@ -709,11 +745,11 @@ def _matchnodes(self, matching, names): nextnames = names[1:] resultnodes = [] for node in matching: - if isinstance(node, pytest.Item): + if isinstance(node, Item): if not names: resultnodes.append(node) continue - assert isinstance(node, pytest.Collector) + assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: has_matched = False @@ -726,16 +762,20 @@ def _matchnodes(self, matching, names): if not has_matched and len(rep.result) == 1 and x.name == "()": nextnames.insert(0, name) resultnodes.extend(self.matchnodes([x], nextnames)) - node.ihook.pytest_collectreport(report=rep) + else: + # report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134) + node.ihook.pytest_collectreport(report=rep) return resultnodes def genitems(self, node): self.trace("genitems", node) - if isinstance(node, pytest.Item): + if isinstance(node, Item): node.ihook.pytest_itemcollected(item=node) yield node else: - assert isinstance(node, pytest.Collector) + assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: diff --git a/third_party/python/pytest/_pytest/mark.py b/third_party/python/pytest/_pytest/mark.py index d8b60def366f4..8b40a4f6e4fbc 100644 --- a/third_party/python/pytest/_pytest/mark.py +++ b/third_party/python/pytest/_pytest/mark.py @@ -1,5 +1,64 @@ """ generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import, division, print_function + import inspect +from collections import namedtuple +from operator import attrgetter +from .compat import imap + + +def alias(name): + return property(attrgetter(name), doc='alias for ' + name) + + +class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): + @classmethod + def param(cls, *values, **kw): + marks = kw.pop('marks', ()) + if isinstance(marks, MarkDecorator): + marks = marks, + else: + assert isinstance(marks, (tuple, list, set)) + + def param_extract_id(id=None): + return id + + id = param_extract_id(**kw) + return cls(values, marks, id) + + @classmethod + def extract_from(cls, parameterset, legacy_force_tuple=False): + """ + :param parameterset: + a legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects + + :param legacy_force_tuple: + enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests + + """ + + if isinstance(parameterset, cls): + return parameterset + if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: + return cls.param(parameterset) + + newmarks = [] + argval = parameterset + while isinstance(argval, MarkDecorator): + newmarks.append(MarkDecorator(Mark( + argval.markname, argval.args[:-1], argval.kwargs))) + argval = argval.args[-1] + assert not isinstance(argval, ParameterSet) + if legacy_force_tuple: + argval = argval, + + return cls(argval, marks=newmarks, id=None) + + @property + def deprecated_arg_dict(self): + return dict((mark.name, mark) for mark in self.marks) class MarkerError(Exception): @@ -7,8 +66,8 @@ class MarkerError(Exception): """Error in use of a pytest marker/attribute.""" -def pytest_namespace(): - return {'mark': MarkGenerator()} +def param(*values, **kw): + return ParameterSet.param(*values, **kw) def pytest_addoption(parser): @@ -19,7 +78,7 @@ def pytest_addoption(parser): help="only run tests which match the given substring expression. " "An expression is a python evaluatable expression " "where all names are substring-matched against test names " - "and their parent classes. Example: -k 'test_method or test " + "and their parent classes. Example: -k 'test_method or test_" "other' matches all test functions and classes whose name " "contains 'test_method' or 'test_other'. " "Additionally keywords are matched to classes and functions " @@ -54,6 +113,8 @@ def pytest_cmdline_main(config): tw.line() config._ensure_unconfigure() return 0 + + pytest_cmdline_main.tryfirst = True @@ -64,7 +125,7 @@ def pytest_collection_modifyitems(items, config): return # pytest used to allow "-" for negating # but today we just allow "-" at the beginning, use "not" instead - # we probably remove "-" alltogether soon + # we probably remove "-" altogether soon if keywordexpr.startswith("-"): keywordexpr = "not " + keywordexpr[1:] selectuntil = False @@ -160,9 +221,13 @@ def matchkeyword(colitem, keywordexpr): def pytest_configure(config): - import pytest + config._old_mark_config = MARK_GEN._config if config.option.strict: - pytest.mark._config = config + MARK_GEN._config = config + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, '_old_mark_config', None) class MarkGenerator: @@ -176,13 +241,15 @@ def test_function(): will set a 'slowtest' :class:`MarkInfo` object on the ``test_function`` object. """ + _config = None + def __getattr__(self, name): if name[0] == "_": raise AttributeError("Marker name must NOT start with underscore") - if hasattr(self, '_config'): + if self._config is not None: self._check(name) - return MarkDecorator(name) + return MarkDecorator(Mark(name, (), {})) def _check(self, name): try: @@ -198,6 +265,7 @@ def _check(self, name): if name not in self._markers: raise AttributeError("%r not a registered marker" % (name,)) + def istestfunc(func): return hasattr(func, "__call__") and \ getattr(func, "__name__", "") != "" @@ -235,19 +303,23 @@ def test_function(): additional keyword or positional arguments. """ - def __init__(self, name, args=None, kwargs=None): - self.name = name - self.args = args or () - self.kwargs = kwargs or {} + def __init__(self, mark): + assert isinstance(mark, Mark), repr(mark) + self.mark = mark + + name = alias('mark.name') + args = alias('mark.args') + kwargs = alias('mark.kwargs') @property def markname(self): return self.name # for backward-compat (2.4.1 had this attr) + def __eq__(self, other): + return self.mark == other.mark + def __repr__(self): - d = self.__dict__.copy() - name = d.pop('name') - return "" % (name, d) + return "" % (self.mark,) def __call__(self, *args, **kwargs): """ if passed a single callable argument: decorate it with mark info. @@ -270,42 +342,50 @@ def __call__(self, *args, **kwargs): else: holder = getattr(func, self.name, None) if holder is None: - holder = MarkInfo( - self.name, self.args, self.kwargs - ) + holder = MarkInfo(self.mark) setattr(func, self.name, holder) else: - holder.add(self.args, self.kwargs) + holder.add_mark(self.mark) return func - kw = self.kwargs.copy() - kw.update(kwargs) - args = self.args + args - return self.__class__(self.name, args=args, kwargs=kw) + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) + -class MarkInfo: + + +class Mark(namedtuple('Mark', 'name, args, kwargs')): + + def combined_with(self, other): + assert self.name == other.name + return Mark( + self.name, self.args + other.args, + dict(self.kwargs, **other.kwargs)) + + +class MarkInfo(object): """ Marking object created by :class:`MarkDecorator` instances. """ - def __init__(self, name, args, kwargs): - #: name of attribute - self.name = name - #: positional argument list, empty if none specified - self.args = args - #: keyword argument dictionary, empty if nothing specified - self.kwargs = kwargs.copy() - self._arglist = [(args, kwargs.copy())] + def __init__(self, mark): + assert isinstance(mark, Mark), repr(mark) + self.combined = mark + self._marks = [mark] + + name = alias('combined.name') + args = alias('combined.args') + kwargs = alias('combined.kwargs') def __repr__(self): - return "" % ( - self.name, self.args, self.kwargs - ) + return "".format(self.combined) - def add(self, args, kwargs): + def add_mark(self, mark): """ add a MarkInfo with the given args and kwargs. """ - self._arglist.append((args, kwargs)) - self.args += args - self.kwargs.update(kwargs) + self._marks.append(mark) + self.combined = self.combined.combined_with(mark) def __iter__(self): """ yield MarkInfo objects each relating to a marking-call. """ - for args, kwargs in self._arglist: - yield MarkInfo(self.name, args, kwargs) + return imap(MarkInfo, self._marks) + + +MARK_GEN = MarkGenerator() diff --git a/third_party/python/pytest/_pytest/monkeypatch.py b/third_party/python/pytest/_pytest/monkeypatch.py index d4c169d37a9ae..a70b23dda9165 100644 --- a/third_party/python/pytest/_pytest/monkeypatch.py +++ b/third_party/python/pytest/_pytest/monkeypatch.py @@ -1,15 +1,19 @@ """ monkeypatching and mocking functionality. """ +from __future__ import absolute_import, division, print_function -import os, sys +import os +import sys import re from py.builtin import _basestring +from _pytest.fixtures import fixture RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") -def pytest_funcarg__monkeypatch(request): - """The returned ``monkeypatch`` funcarg provides these +@fixture +def monkeypatch(): + """The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: monkeypatch.setattr(obj, name, value, raising=True) @@ -22,13 +26,13 @@ def pytest_funcarg__monkeypatch(request): monkeypatch.chdir(path) All modifications will be undone after the requesting - test function has finished. The ``raising`` + test function or fixture has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. """ - mpatch = monkeypatch() - request.addfinalizer(mpatch.undo) - return mpatch + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() def resolve(name): @@ -93,8 +97,9 @@ def __repr__(self): notset = Notset() -class monkeypatch: - """ Object keeping a record of setattr/item/env/syspath changes. """ +class MonkeyPatch: + """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. + """ def __init__(self): self._setattr = [] @@ -220,10 +225,10 @@ def undo(self): """ Undo previous changes. This call consumes the undo stack. Calling it a second time has no effect unless you do more monkeypatching after the undo call. - + There is generally no need to call `undo()`, since it is called automatically during tear-down. - + Note that the same `monkeypatch` fixture is used across a single test function invocation. If `monkeypatch` is used both by the test function itself and one of the test fixtures, diff --git a/third_party/python/pytest/_pytest/nose.py b/third_party/python/pytest/_pytest/nose.py index 0387468686076..9d4fc0b6e1558 100644 --- a/third_party/python/pytest/_pytest/nose.py +++ b/third_party/python/pytest/_pytest/nose.py @@ -1,10 +1,11 @@ """ run test suites written for nose. """ +from __future__ import absolute_import, division, print_function import sys import py -import pytest -from _pytest import unittest +from _pytest import unittest, runner, python +from _pytest.config import hookimpl def get_skip_exceptions(): @@ -19,19 +20,19 @@ def get_skip_exceptions(): def pytest_runtest_makereport(item, call): if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): # let's substitute the excinfo with a pytest.skip one - call2 = call.__class__(lambda: - pytest.skip(str(call.excinfo.value)), call.when) + call2 = call.__class__( + lambda: runner.skip(str(call.excinfo.value)), call.when) call.excinfo = call2.excinfo -@pytest.hookimpl(trylast=True) +@hookimpl(trylast=True) def pytest_runtest_setup(item): if is_potential_nosetest(item): - if isinstance(item.parent, pytest.Generator): + if isinstance(item.parent, python.Generator): gen = item.parent if not hasattr(gen, '_nosegensetup'): call_optional(gen.obj, 'setup') - if isinstance(gen.parent, pytest.Instance): + if isinstance(gen.parent, python.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True if not call_optional(item.obj, 'setup'): @@ -50,14 +51,14 @@ def teardown_nose(item): def pytest_make_collect_report(collector): - if isinstance(collector, pytest.Generator): + if isinstance(collector, python.Generator): call_optional(collector.obj, 'setup') def is_potential_nosetest(item): # extra check needed since we do not do nose style setup/teardown # on direct unittest style classes - return isinstance(item, pytest.Function) and \ + return isinstance(item, python.Function) and \ not isinstance(item, unittest.TestCaseFunction) diff --git a/third_party/python/pytest/_pytest/pastebin.py b/third_party/python/pytest/_pytest/pastebin.py index 4ec62d0228050..6f3ce8feddd1d 100644 --- a/third_party/python/pytest/_pytest/pastebin.py +++ b/third_party/python/pytest/_pytest/pastebin.py @@ -1,4 +1,6 @@ """ submit failure or test session information to a pastebin service. """ +from __future__ import absolute_import, division, print_function + import pytest import sys import tempfile @@ -11,6 +13,7 @@ def pytest_addoption(parser): choices=['failed', 'all'], help="send failed|all info to bpaste.net pastebin service.") + @pytest.hookimpl(trylast=True) def pytest_configure(config): import py @@ -23,13 +26,16 @@ def pytest_configure(config): # pastebin file will be utf-8 encoded binary file config._pastebinfile = tempfile.TemporaryFile('w+b') oldwrite = tr._tw.write + def tee_write(s, **kwargs): oldwrite(s, **kwargs) if py.builtin._istext(s): s = s.encode('utf-8') config._pastebinfile.write(s) + tr._tw.write = tee_write + def pytest_unconfigure(config): if hasattr(config, '_pastebinfile'): # get terminal contents and delete file @@ -45,6 +51,7 @@ def pytest_unconfigure(config): pastebinurl = create_new_paste(sessionlog) tr.write_line("pastebin session-log: %s\n" % pastebinurl) + def create_new_paste(contents): """ Creates a new paste using bpaste.net service. @@ -72,6 +79,7 @@ def create_new_paste(contents): else: return 'bad response: ' + response + def pytest_terminal_summary(terminalreporter): import _pytest.config if terminalreporter.config.option.pastebin != "failed": diff --git a/third_party/python/pytest/_pytest/pytester.py b/third_party/python/pytest/_pytest/pytester.py index faed7f581c9d0..901caa340b865 100644 --- a/third_party/python/pytest/_pytest/pytester.py +++ b/third_party/python/pytest/_pytest/pytester.py @@ -1,4 +1,6 @@ """ (disabled by default) support for testing pytest and pytest plugins. """ +from __future__ import absolute_import, division, print_function + import codecs import gc import os @@ -10,12 +12,14 @@ import traceback from fnmatch import fnmatch -from py.builtin import print_ +from weakref import WeakKeyDictionary +from _pytest.capture import MultiCapture, SysCapture from _pytest._code import Source import py import pytest from _pytest.main import Session, EXIT_OK +from _pytest.assertion.rewrite import AssertionRewritingHook def pytest_addoption(parser): @@ -84,7 +88,7 @@ def matching_platform(self): return True @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_runtest_item(self, item): + def pytest_runtest_protocol(self, item): lines1 = self.get_open_files() yield if hasattr(sys, "pypy_version_info"): @@ -103,7 +107,8 @@ def pytest_runtest_item(self, item): error.extend([str(f) for f in lines2]) error.append(error[0]) error.append("*** function %s:%s: %s " % item.location) - pytest.fail("\n".join(error), pytrace=False) + error.append("See issue #2366") + item.warn('', "\n".join(error)) # XXX copied from execnet's conftest.py - needs to be merged @@ -123,15 +128,18 @@ def getexecutable(name, cache={}): except KeyError: executable = py.path.local.sysfind(name) if executable: + import subprocess + popen = subprocess.Popen([str(executable), "--version"], + universal_newlines=True, stderr=subprocess.PIPE) + out, err = popen.communicate() if name == "jython": - import subprocess - popen = subprocess.Popen([str(executable), "--version"], - universal_newlines=True, stderr=subprocess.PIPE) - out, err = popen.communicate() if not err or "2.5" not in err: executable = None if "2.5.2" in err: executable = None # http://bugs.jython.org/issue1790 + elif popen.returncode != 0: + # Handle pyenv's 127. + executable = None cache[name] = executable return executable @@ -222,15 +230,15 @@ def assert_contains(self, entries): name, check = entries.pop(0) for ind, call in enumerate(self.calls[i:]): if call._name == name: - print_("NAMEMATCH", name, call) + print("NAMEMATCH", name, call) if eval(check, backlocals, call.__dict__): - print_("CHECKERMATCH", repr(check), "->", call) + print("CHECKERMATCH", repr(check), "->", call) else: - print_("NOCHECKERMATCH", repr(check), "-", call) + print("NOCHECKERMATCH", repr(check), "-", call) continue i += ind + 1 break - print_("NONAMEMATCH", name, "with", call) + print("NONAMEMATCH", name, "with", call) else: pytest.fail("could not find %r check %r" % (name, check)) @@ -318,7 +326,8 @@ def linecomp(request): return LineComp() -def pytest_funcarg__LineMatcher(request): +@pytest.fixture(name='LineMatcher') +def LineMatcher_fixture(request): return LineMatcher @@ -327,7 +336,7 @@ def testdir(request, tmpdir_factory): return Testdir(request, tmpdir_factory) -rex_outcome = re.compile("(\d+) ([\w-]+)") +rex_outcome = re.compile(r"(\d+) ([\w-]+)") class RunResult: """The result of running a command. @@ -362,6 +371,7 @@ def parseoutcomes(self): for num, cat in outcomes: d[cat] = int(num) return d + raise ValueError("Pytest terminal report not found") def assert_outcomes(self, passed=0, skipped=0, failed=0): """ assert that the specified outcomes appear with the respective @@ -374,10 +384,10 @@ def assert_outcomes(self, passed=0, skipped=0, failed=0): class Testdir: - """Temporary test directory with tools to test/run py.test itself. + """Temporary test directory with tools to test/run pytest itself. This is based on the ``tmpdir`` fixture but provides a number of - methods which aid with testing py.test itself. Unless + methods which aid with testing pytest itself. Unless :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as current working directory. @@ -396,6 +406,7 @@ class Testdir: def __init__(self, request, tmpdir_factory): self.request = request + self._mod_collections = WeakKeyDictionary() # XXX remove duplication with tmpdir plugin basetmp = tmpdir_factory.ensuretemp("testdir") name = request.function.__name__ @@ -441,9 +452,10 @@ def delete_loaded_modules(self): the module is re-imported. """ for name in set(sys.modules).difference(self._savemodulekeys): - # it seems zope.interfaces is keeping some state - # (used by twisted related tests) - if name != "zope.interface": + # some zope modules used by twisted-related tests keeps internal + # state and can't be deleted; we had some trouble in the past + # with zope.interface for example + if not name.startswith("zope"): del sys.modules[name] def make_hook_recorder(self, pluginmanager): @@ -463,7 +475,7 @@ def chdir(self): if not hasattr(self, '_olddir'): self._olddir = old - def _makefile(self, ext, args, kwargs): + def _makefile(self, ext, args, kwargs, encoding="utf-8"): items = list(kwargs.items()) if args: source = py.builtin._totext("\n").join( @@ -473,14 +485,17 @@ def _makefile(self, ext, args, kwargs): ret = None for name, value in items: p = self.tmpdir.join(name).new(ext=ext) + p.dirpath().ensure_dir() source = Source(value) + def my_totext(s, encoding="utf-8"): if py.builtin._isbytes(s): s = py.builtin._totext(s, encoding=encoding) return s + source_unicode = "\n".join([my_totext(line) for line in source.lines]) source = py.builtin._totext(source_unicode) - content = source.strip().encode("utf-8") # + "\n" + content = source.strip().encode(encoding) # + "\n" #content = content.rstrip() + "\n" p.write(content, "wb") if ret is None: @@ -557,7 +572,7 @@ def mkdir(self, name): def mkpydir(self, name): """Create a new python package. - This creates a (sub)direcotry with an empty ``__init__.py`` + This creates a (sub)directory with an empty ``__init__.py`` file so that is recognised as a python package. """ @@ -588,7 +603,7 @@ def getpathnode(self, path): """Return the collection node of a file. This is like :py:meth:`getnode` but uses - :py:meth:`parseconfigure` to create the (configured) py.test + :py:meth:`parseconfigure` to create the (configured) pytest Config instance. :param path: A :py:class:`py.path.local` instance of the file. @@ -652,11 +667,11 @@ def inline_runsource(self, source, *cmdlineargs): def inline_genitems(self, *args): """Run ``pytest.main(['--collectonly'])`` in-process. - Retuns a tuple of the collected items and a + Returns a tuple of the collected items and a :py:class:`HookRecorder` instance. This runs the :py:func:`pytest.main` function to run all of - py.test inside the test process itself like + pytest inside the test process itself like :py:meth:`inline_run`. However the return value is a tuple of the collection items and a :py:class:`HookRecorder` instance. @@ -669,7 +684,7 @@ def inline_run(self, *args, **kwargs): """Run ``pytest.main()`` in-process, returning a HookRecorder. This runs the :py:func:`pytest.main` function to run all of - py.test inside the test process itself. This means it can + pytest inside the test process itself. This means it can return a :py:class:`HookRecorder` instance which gives more detailed results from then run then can be done by matching stdout/stderr from :py:meth:`runpytest`. @@ -681,9 +696,21 @@ def inline_run(self, *args, **kwargs): ``pytest.main()`` instance should use. :return: A :py:class:`HookRecorder` instance. - """ + # When running py.test inline any plugins active in the main + # test process are already imported. So this disables the + # warning which will trigger to say they can no longer be + # re-written, which is fine as they are already re-written. + orig_warn = AssertionRewritingHook._warn_already_imported + + def revert(): + AssertionRewritingHook._warn_already_imported = orig_warn + + self.request.addfinalizer(revert) + AssertionRewritingHook._warn_already_imported = lambda *a: None + rec = [] + class Collect: def pytest_configure(x, config): rec.append(self.make_hook_recorder(config.pluginmanager)) @@ -713,19 +740,24 @@ def runpytest_inprocess(self, *args, **kwargs): if kwargs.get("syspathinsert"): self.syspathinsert() now = time.time() - capture = py.io.StdCapture() + capture = MultiCapture(Capture=SysCapture) + capture.start_capturing() try: try: reprec = self.inline_run(*args, **kwargs) except SystemExit as e: + class reprec: ret = e.args[0] + except Exception: traceback.print_exc() + class reprec: ret = 3 finally: - out, err = capture.reset() + out, err = capture.readouterr() + capture.stop_capturing() sys.stdout.write(out) sys.stderr.write(err) @@ -755,9 +787,9 @@ def _ensure_basetemp(self, args): return args def parseconfig(self, *args): - """Return a new py.test Config instance from given commandline args. + """Return a new pytest Config instance from given commandline args. - This invokes the py.test bootstrapping code in _pytest.config + This invokes the pytest bootstrapping code in _pytest.config to create a new :py:class:`_pytest.core.PluginManager` and call the pytest_cmdline_parse hook to create new :py:class:`_pytest.config.Config` instance. @@ -777,7 +809,7 @@ def parseconfig(self, *args): return config def parseconfigure(self, *args): - """Return a new py.test configured Config instance. + """Return a new pytest configured Config instance. This returns a new :py:class:`_pytest.config.Config` instance like :py:meth:`parseconfig`, but also calls the @@ -792,7 +824,7 @@ def parseconfigure(self, *args): def getitem(self, source, funcname="test_func"): """Return the test item for a test function. - This writes the source to a python file and runs py.test's + This writes the source to a python file and runs pytest's collection on the resulting module, returning the test item for the requested function name. @@ -812,7 +844,7 @@ def getitem(self, source, funcname="test_func"): def getitems(self, source): """Return all test items collected from the module. - This writes the source to a python file and runs py.test's + This writes the source to a python file and runs pytest's collection on the resulting module, returning all test items contained within. @@ -824,7 +856,7 @@ def getmodulecol(self, source, configargs=(), withinit=False): """Return the module collection node for ``source``. This writes ``source`` to a file using :py:meth:`makepyfile` - and then runs the py.test collection on it, returning the + and then runs the pytest collection on it, returning the collection node for the test module. :param source: The source code of the module to collect. @@ -833,7 +865,7 @@ def getmodulecol(self, source, configargs=(), withinit=False): :py:meth:`parseconfigure`. :param withinit: Whether to also write a ``__init__.py`` file - to the temporarly directory to ensure it is a package. + to the temporary directory to ensure it is a package. """ kw = {self.request.function.__name__: Source(source).strip()} @@ -842,6 +874,7 @@ def getmodulecol(self, source, configargs=(), withinit=False): self.makepyfile(__init__ = "#") self.config = config = self.parseconfigure(path, *configargs) node = self.getnode(config, path) + return node def collect_by_name(self, modcol, name): @@ -856,7 +889,9 @@ def collect_by_name(self, modcol, name): :param name: The name of the node to return. """ - for colitem in modcol._memocollect(): + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: if colitem.name == name: return colitem @@ -891,8 +926,8 @@ def _run(self, *cmdargs): cmdargs = [str(x) for x in cmdargs] p1 = self.tmpdir.join("stdout") p2 = self.tmpdir.join("stderr") - print_("running:", ' '.join(cmdargs)) - print_(" in:", str(py.path.local())) + print("running:", ' '.join(cmdargs)) + print(" in:", str(py.path.local())) f1 = codecs.open(str(p1), "w", encoding="utf8") f2 = codecs.open(str(p2), "w", encoding="utf8") try: @@ -918,13 +953,13 @@ def _run(self, *cmdargs): def _dump_lines(self, lines, fp): try: for line in lines: - py.builtin.print_(line, file=fp) + print(line, file=fp) except UnicodeEncodeError: print("couldn't print to %s because of encoding" % (fp,)) def _getpytestargs(self): # we cannot use "(sys.executable,script)" - # because on windows the script is e.g. a py.test.exe + # because on windows the script is e.g. a pytest.exe return (sys.executable, _pytest_fullpath,) # noqa def runpython(self, script): @@ -939,7 +974,7 @@ def runpython_c(self, command): return self.run(sys.executable, "-c", command) def runpytest_subprocess(self, *args, **kwargs): - """Run py.test as a subprocess with given arguments. + """Run pytest as a subprocess with given arguments. Any plugins added to the :py:attr:`plugins` list will added using the ``-p`` command line option. Addtionally @@ -967,15 +1002,15 @@ def runpytest_subprocess(self, *args, **kwargs): return self.run(*args) def spawn_pytest(self, string, expect_timeout=10.0): - """Run py.test using pexpect. + """Run pytest using pexpect. - This makes sure to use the right py.test and sets up the + This makes sure to use the right pytest and sets up the temporary directory locations. The pexpect child is returned. """ - basetemp = self.tmpdir.mkdir("pexpect") + basetemp = self.tmpdir.mkdir("temp-pexpect") invoke = " ".join(map(str, self._getpytestargs())) cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) return self.spawn(cmd, expect_timeout=expect_timeout) @@ -988,8 +1023,6 @@ def spawn(self, cmd, expect_timeout=10.0): pexpect = pytest.importorskip("pexpect", "3.0") if hasattr(sys, 'pypy_version_info') and '64' in platform.machine(): pytest.skip("pypy-64 bit not supported") - if sys.platform == "darwin": - pytest.xfail("pexpect does not work reliably on darwin?!") if sys.platform.startswith("freebsd"): pytest.xfail("pexpect does not work reliably on freebsd") logfile = self.tmpdir.join("spawn.out").open("wb") @@ -1035,6 +1068,7 @@ class LineMatcher: def __init__(self, lines): self.lines = lines + self._log_output = [] def str(self): """Return the entire original text.""" @@ -1058,10 +1092,11 @@ def fnmatch_lines_random(self, lines2): for line in lines2: for x in self.lines: if line == x or fnmatch(x, line): - print_("matched: ", repr(line)) + self._log("matched: ", repr(line)) break else: - raise ValueError("line %r not found in output" % line) + self._log("line %r not found in output" % line) + raise ValueError(self._log_text) def get_lines_after(self, fnline): """Return all lines following the given line in the text. @@ -1073,6 +1108,13 @@ def get_lines_after(self, fnline): return self.lines[i+1:] raise ValueError("line %r not found in output" % fnline) + def _log(self, *args): + self._log_output.append(' '.join((str(x) for x in args))) + + @property + def _log_text(self): + return '\n'.join(self._log_output) + def fnmatch_lines(self, lines2): """Search the text for matching lines. @@ -1082,8 +1124,6 @@ def fnmatch_lines(self, lines2): stdout. """ - def show(arg1, arg2): - py.builtin.print_(arg1, arg2, file=sys.stderr) lines2 = self._getlines(lines2) lines1 = self.lines[:] nextline = None @@ -1094,17 +1134,18 @@ def show(arg1, arg2): while lines1: nextline = lines1.pop(0) if line == nextline: - show("exact match:", repr(line)) + self._log("exact match:", repr(line)) break elif fnmatch(nextline, line): - show("fnmatch:", repr(line)) - show(" with:", repr(nextline)) + self._log("fnmatch:", repr(line)) + self._log(" with:", repr(nextline)) break else: if not nomatchprinted: - show("nomatch:", repr(line)) + self._log("nomatch:", repr(line)) nomatchprinted = True - show(" and:", repr(nextline)) + self._log(" and:", repr(nextline)) extralines.append(nextline) else: - pytest.fail("remains unmatched: %r, see stderr" % (line,)) + self._log("remains unmatched: %r" % (line,)) + pytest.fail(self._log_text) diff --git a/third_party/python/pytest/_pytest/python.py b/third_party/python/pytest/_pytest/python.py index 21d78aea3375f..06f74ce4b8ad7 100644 --- a/third_party/python/pytest/_pytest/python.py +++ b/third_party/python/pytest/_pytest/python.py @@ -1,64 +1,40 @@ """ Python test discovery, setup and run of test functions. """ +from __future__ import absolute_import, division, print_function + import fnmatch -import functools import inspect -import re -import types import sys +import os +import collections +import math +from itertools import count import py -import pytest -from _pytest._code.code import TerminalRepr -from _pytest.mark import MarkDecorator, MarkerError - -try: - import enum -except ImportError: # pragma: no cover - # Only available in Python 3.4+ or as a backport - enum = None +from _pytest.mark import MarkerError +from _pytest.config import hookimpl import _pytest import _pytest._pluggy as pluggy +from _pytest import fixtures +from _pytest import main +from _pytest.compat import ( + isclass, isfunction, is_generator, _escape_strings, + REGEX_TYPE, STRING_TYPES, NoneType, NOTSET, + get_real_func, getfslineno, safe_getattr, + safe_str, getlocation, enum, +) +from _pytest.runner import fail -cutdir2 = py.path.local(_pytest.__file__).dirpath() cutdir1 = py.path.local(pluggy.__file__.rstrip("oc")) - - -NoneType = type(None) -NOTSET = object() -isfunction = inspect.isfunction -isclass = inspect.isclass -callable = py.builtin.callable -# used to work around a python2 exception info leak -exc_clear = getattr(sys, 'exc_clear', lambda: None) -# The type of re.compile objects is not exposed in Python. -REGEX_TYPE = type(re.compile('')) - -_PY3 = sys.version_info > (3, 0) -_PY2 = not _PY3 - - -if hasattr(inspect, 'signature'): - def _format_args(func): - return str(inspect.signature(func)) -else: - def _format_args(func): - return inspect.formatargspec(*inspect.getargspec(func)) - -if sys.version_info[:2] == (2, 6): - def isclass(object): - """ Return true if the object is a class. Overrides inspect.isclass for - python 2.6 because it will return True for objects which always return - something on __getattr__ calls (see #1035). - Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc - """ - return isinstance(object, (type, types.ClassType)) - -def _has_positional_arg(func): - return func.__code__.co_argcount +cutdir2 = py.path.local(_pytest.__file__).dirpath() +cutdir3 = py.path.local(py.__file__).dirpath() def filter_traceback(entry): + """Return True if a TracebackEntry instance should be removed from tracebacks: + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ # entry.path might sometimes return a str object when the entry # points to dynamically generated code # see https://bitbucket.org/pytest-dev/py/issues/71 @@ -69,123 +45,13 @@ def filter_traceback(entry): # entry.path might point to an inexisting file, in which case it will # alsso return a str object. see #1133 p = py.path.local(entry.path) - return p != cutdir1 and not p.relto(cutdir2) + return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3) -def get_real_func(obj): - """ gets the real function object of the (possibly) wrapped object by - functools.wraps or functools.partial. - """ - while hasattr(obj, "__wrapped__"): - obj = obj.__wrapped__ - if isinstance(obj, functools.partial): - obj = obj.func - return obj - -def getfslineno(obj): - # xxx let decorators etc specify a sane ordering - obj = get_real_func(obj) - if hasattr(obj, 'place_as'): - obj = obj.place_as - fslineno = _pytest._code.getfslineno(obj) - assert isinstance(fslineno[1], int), obj - return fslineno - -def getimfunc(func): - try: - return func.__func__ - except AttributeError: - try: - return func.im_func - except AttributeError: - return func - -def safe_getattr(object, name, default): - """ Like getattr but return default upon any Exception. - - Attribute access can potentially fail for 'evil' Python objects. - See issue214 - """ - try: - return getattr(object, name, default) - except Exception: - return default - - -class FixtureFunctionMarker: - def __init__(self, scope, params, - autouse=False, yieldctx=False, ids=None): - self.scope = scope - self.params = params - self.autouse = autouse - self.yieldctx = yieldctx - self.ids = ids - - def __call__(self, function): - if isclass(function): - raise ValueError( - "class fixtures not supported (may be in the future)") - function._pytestfixturefunction = self - return function - - -def fixture(scope="function", params=None, autouse=False, ids=None): - """ (return a) decorator to mark a fixture factory function. - - This decorator can be used (with or or without parameters) to define - a fixture function. The name of the fixture function can later be - referenced to cause its invocation ahead of running tests: test - modules or classes can use the pytest.mark.usefixtures(fixturename) - marker. Test functions can directly use fixture names as input - arguments in which case the fixture instance returned from the fixture - function will be injected. - - :arg scope: the scope for which this fixture is shared, one of - "function" (default), "class", "module", "session". - - :arg params: an optional list of parameters which will cause multiple - invocations of the fixture function and all of the tests - using it. - - :arg autouse: if True, the fixture func is activated for all tests that - can see it. If False (the default) then an explicit - reference is needed to activate the fixture. - - :arg ids: list of string ids each corresponding to the params - so that they are part of the test id. If no ids are provided - they will be generated automatically from the params. - - """ - if callable(scope) and params is None and autouse == False: - # direct decoration - return FixtureFunctionMarker( - "function", params, autouse)(scope) - if params is not None and not isinstance(params, (list, tuple)): - params = list(params) - return FixtureFunctionMarker(scope, params, autouse, ids=ids) - -def yield_fixture(scope="function", params=None, autouse=False, ids=None): - """ (return a) decorator to mark a yield-fixture factory function - (EXPERIMENTAL). - - This takes the same arguments as :py:func:`pytest.fixture` but - expects a fixture function to use a ``yield`` instead of a ``return`` - statement to provide a fixture. See - http://pytest.org/en/latest/yieldfixture.html for more info. - """ - if callable(scope) and params is None and autouse == False: - # direct decoration - return FixtureFunctionMarker( - "function", params, autouse, yieldctx=True)(scope) - else: - return FixtureFunctionMarker(scope, params, autouse, - yieldctx=True, ids=ids) - -defaultfuncargprefixmarker = fixture() def pyobj_property(name): def get(self): - node = self.getparent(getattr(pytest, name)) + node = self.getparent(getattr(__import__('pytest'), name)) if node is not None: return node.obj doc = "python %s object this node was collected from (can be None)." % ( @@ -198,6 +64,13 @@ def pytest_addoption(parser): group.addoption('--fixtures', '--funcargs', action="store_true", dest="showfixtures", default=False, help="show available fixtures, sorted by plugin appearance") + group.addoption( + '--fixtures-per-test', + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) parser.addini("usefixtures", type="args", default=[], help="list of default fixtures to be used with this project") parser.addini("python_files", type="args", @@ -219,6 +92,9 @@ def pytest_cmdline_main(config): if config.option.showfixtures: showfixtures(config) return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 def pytest_generate_tests(metafunc): @@ -252,29 +128,8 @@ def pytest_configure(config): "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " ) -def pytest_sessionstart(session): - session._fixturemanager = FixtureManager(session) - -@pytest.hookimpl(trylast=True) -def pytest_namespace(): - raises.Exception = pytest.fail.Exception - return { - 'fixture': fixture, - 'yield_fixture': yield_fixture, - 'raises' : raises, - 'collect': { - 'Module': Module, 'Class': Class, 'Instance': Instance, - 'Function': Function, 'Generator': Generator, - '_fillfuncargs': fillfixtures} - } - -@fixture(scope="session") -def pytestconfig(request): - """ the pytest config object with access to command line opts.""" - return request.config - - -@pytest.hookimpl(trylast=True) + +@hookimpl(trylast=True) def pytest_pyfunc_call(pyfuncitem): testfunction = pyfuncitem.obj if pyfuncitem._isyieldedfunction(): @@ -287,6 +142,7 @@ def pytest_pyfunc_call(pyfuncitem): testfunction(**testargs) return True + def pytest_collect_file(path, parent): ext = path.ext if ext == ".py": @@ -302,12 +158,12 @@ def pytest_collect_file(path, parent): def pytest_pycollect_makemodule(path, parent): return Module(path, parent) -@pytest.hookimpl(hookwrapper=True) +@hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(collector, name, obj): outcome = yield res = outcome.get_result() if res is not None: - raise StopIteration + return # nothing was collected elsewhere, let's do it here if isclass(obj): if collector.istestclass(obj, name): @@ -330,12 +186,10 @@ def pytest_pycollect_makeitem(collector, name, obj): res = list(collector._genfunctions(name, obj)) outcome.force_result(res) -def is_generator(func): - try: - return _pytest._code.getrawcode(func).co_flags & 32 # generator function - except AttributeError: # builtin functions have no bytecode - # assume them to not be generators - return False +def pytest_make_parametrize_id(config, val, argname=None): + return None + + class PyobjContext(object): module = pyobj_property("Module") @@ -345,14 +199,16 @@ class PyobjContext(object): class PyobjMixin(PyobjContext): def obj(): def fget(self): - try: - return self._obj - except AttributeError: + obj = getattr(self, '_obj', None) + if obj is None: self._obj = obj = self._getobj() - return obj + return obj + def fset(self, value): self._obj = value + return property(fget, fset, None, "underlying python object") + obj = obj() def _getobj(self): @@ -368,8 +224,7 @@ def getmodpath(self, stopatmodule=True, includemodule=False): continue name = node.name if isinstance(node, Module): - assert name.endswith(".py") - name = name[:-3] + name = os.path.splitext(name)[0] if stopatmodule: if includemodule: parts.append(name) @@ -398,7 +253,7 @@ def reportinfo(self): assert isinstance(lineno, int) return fspath, lineno, modpath -class PyCollector(PyobjMixin, pytest.Collector): +class PyCollector(PyobjMixin, main.Collector): def funcnamefilter(self, name): return self._matches_prefix_or_glob_option('python_functions', name) @@ -418,7 +273,7 @@ def classnamefilter(self, name): def istestfunction(self, obj, name): return ( (self.funcnamefilter(name) or self.isnosetest(obj)) and - safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None + safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None ) def istestclass(self, obj, name): @@ -495,76 +350,16 @@ def _genfunctions(self, name, funcobj): yield Function(name, parent=self, fixtureinfo=fixtureinfo) else: # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs - add_funcarg_pseudo_fixture_def(self, metafunc, fm) + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) for callspec in metafunc._calls: - subname = "%s[%s]" %(name, callspec.id) + subname = "%s[%s]" % (name, callspec.id) yield Function(name=subname, parent=self, callspec=callspec, callobj=funcobj, fixtureinfo=fixtureinfo, - keywords={callspec.id:True}) - -def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): - # this function will transform all collected calls to a functions - # if they use direct funcargs (i.e. direct parametrization) - # because we want later test execution to be able to rely on - # an existing FixtureDef structure for all arguments. - # XXX we can probably avoid this algorithm if we modify CallSpec2 - # to directly care for creating the fixturedefs within its methods. - if not metafunc._calls[0].funcargs: - return # this function call does not have direct parametrization - # collect funcargs of all callspecs into a list of values - arg2params = {} - arg2scope = {} - for callspec in metafunc._calls: - for argname, argvalue in callspec.funcargs.items(): - assert argname not in callspec.params - callspec.params[argname] = argvalue - arg2params_list = arg2params.setdefault(argname, []) - callspec.indices[argname] = len(arg2params_list) - arg2params_list.append(argvalue) - if argname not in arg2scope: - scopenum = callspec._arg2scopenum.get(argname, - scopenum_function) - arg2scope[argname] = scopes[scopenum] - callspec.funcargs.clear() - - # register artificial FixtureDef's so that later at test execution - # time we can rely on a proper FixtureDef to exist for fixture setup. - arg2fixturedefs = metafunc._arg2fixturedefs - for argname, valuelist in arg2params.items(): - # if we have a scope that is higher than function we need - # to make sure we only ever create an according fixturedef on - # a per-scope basis. We thus store and cache the fixturedef on the - # node related to the scope. - scope = arg2scope[argname] - node = None - if scope != "function": - node = get_scope_node(collector, scope) - if node is None: - assert scope == "class" and isinstance(collector, Module) - # use module-level collector for class-scope (for now) - node = collector - if node and argname in node._name2pseudofixturedef: - arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] - else: - fixturedef = FixtureDef(fixturemanager, '', argname, - get_direct_param_fixture_func, - arg2scope[argname], - valuelist, False, False) - arg2fixturedefs[argname] = [fixturedef] - if node is not None: - node._name2pseudofixturedef[argname] = fixturedef - - -def get_direct_param_fixture_func(request): - return request.param - -class FuncFixtureInfo: - def __init__(self, argnames, names_closure, name2fixturedefs): - self.argnames = argnames - self.names_closure = names_closure - self.name2fixturedefs = name2fixturedefs + keywords={callspec.id:True}, + originalname=name, + ) def _marked(func, mark): @@ -595,10 +390,12 @@ def transfer_markers(funcobj, cls, mod): if not _marked(funcobj, pytestmark): pytestmark(funcobj) -class Module(pytest.File, PyCollector): + +class Module(main.File, PyCollector): """ Collector for test classes and functions. """ + def _getobj(self): - return self._memoizedcall('_obj', self._importtestmodule) + return self._importtestmodule() def collect(self): self.session._fixturemanager.parsefactories(self) @@ -624,47 +421,91 @@ def _importtestmodule(self): "unique basename for your test file modules" % e.args ) - #print "imported test module", mod + except ImportError: + from _pytest._code.code import ExceptionInfo + exc_info = ExceptionInfo() + if self.config.getoption('verbose') < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly() + formatted_tb = safe_str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{fspath}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + ) + except _pytest.runner.Skipped as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test is not allowed. If you are " + "trying to decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead." + ) self.config.pluginmanager.consider_module(mod) return mod def setup(self): - setup_module = xunitsetup(self.obj, "setUpModule") + setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule") if setup_module is None: - setup_module = xunitsetup(self.obj, "setup_module") + setup_module = _get_xunit_setup_teardown(self.obj, "setup_module") if setup_module is not None: - #XXX: nose compat hack, move to nose plugin - # if it takes a positional arg, its probably a pytest style one - # so we pass the current module object - if _has_positional_arg(setup_module): - setup_module(self.obj) - else: - setup_module() - fin = getattr(self.obj, 'tearDownModule', None) - if fin is None: - fin = getattr(self.obj, 'teardown_module', None) - if fin is not None: - #XXX: nose compat hack, move to nose plugin - # if it takes a positional arg, it's probably a pytest style one - # so we pass the current module object - if _has_positional_arg(fin): - finalizer = lambda: fin(self.obj) - else: - finalizer = fin - self.addfinalizer(finalizer) + setup_module() + + teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule') + if teardown_module is None: + teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module') + if teardown_module is not None: + self.addfinalizer(teardown_module) + + +def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): + """ + Return a callable to perform xunit-style setup or teardown if + the function exists in the ``holder`` object. + The ``param_obj`` parameter is the parameter which will be passed to the function + when the callable is called without arguments, defaults to the ``holder`` object. + Return ``None`` if a suitable callable is not found. + """ + param_obj = param_obj if param_obj is not None else holder + result = _get_xunit_func(holder, attr_name) + if result is not None: + arg_count = result.__code__.co_argcount + if inspect.ismethod(result): + arg_count -= 1 + if arg_count: + return lambda: result(param_obj) + else: + return result + + +def _get_xunit_func(obj, name): + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to + avoid calling it twice. + """ + meth = getattr(obj, name, None) + if fixtures.getfixturemarker(meth) is None: + return meth class Class(PyCollector): """ Collector for test methods. """ def collect(self): + if not safe_getattr(self.obj, "__test__", True): + return [] if hasinit(self.obj): self.warn("C1", "cannot collect test class %r because it has a " "__init__ constructor" % self.obj.__name__) return [] + elif hasnew(self.obj): + self.warn("C1", "cannot collect test class %r because it has a " + "__new__ constructor" % self.obj.__name__) + return [] return [self._getcustomclass("Instance")(name="()", parent=self)] def setup(self): - setup_class = xunitsetup(self.obj, 'setup_class') + setup_class = _get_xunit_func(self.obj, 'setup_class') if setup_class is not None: setup_class = getattr(setup_class, 'im_func', setup_class) setup_class = getattr(setup_class, '__func__', setup_class) @@ -678,8 +519,7 @@ def setup(self): class Instance(PyCollector): def _getobj(self): - obj = self.parent.obj() - return obj + return self.parent.obj() def collect(self): self.session._fixturemanager.parsefactories(self) @@ -708,12 +548,12 @@ def setup(self): else: setup_name = 'setup_function' teardown_name = 'teardown_function' - setup_func_or_method = xunitsetup(obj, setup_name) + setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj) if setup_func_or_method is not None: - setup_func_or_method(self.obj) - fin = getattr(obj, teardown_name, None) - if fin is not None: - self.addfinalizer(lambda: fin(self.obj)) + setup_func_or_method() + teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj) + if teardown_func_or_method is not None: + self.addfinalizer(teardown_func_or_method) def _prunetraceback(self, excinfo): if hasattr(self, '_obj') and not self.config.option.fulltrace: @@ -738,7 +578,7 @@ def _prunetraceback(self, excinfo): entry.set_repr_style('short') def _repr_failure_py(self, excinfo, style="long"): - if excinfo.errisinstance(pytest.fail.Exception): + if excinfo.errisinstance(fail.Exception): if not excinfo.value.pytrace: return py._builtin._totext(excinfo.value) return super(FunctionMixin, self)._repr_failure_py(excinfo, @@ -757,6 +597,7 @@ def collect(self): # test generators are seen as collectors but they also # invoke setup/teardown on popular request # (induced by the common "test_*" naming shared with normal tests) + from _pytest import deprecated self.session._setupstate.prepare(self) # see FunctionMixin.setup and test_setupstate_is_preserved_134 self._preservedparent = self.parent.obj @@ -774,12 +615,13 @@ def collect(self): raise ValueError("%r generated tests with non-unique name %r" %(self, name)) seen[name] = True l.append(self.Function(name, self, args=args, callobj=call)) + self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath) return l def getcallargs(self, obj): if not isinstance(obj, (tuple, list)): obj = (obj,) - # explict naming + # explicit naming if isinstance(obj[0], py.builtin._basestring): name = obj[0] obj = obj[1:] @@ -792,34 +634,14 @@ def getcallargs(self, obj): def hasinit(obj): init = getattr(obj, '__init__', None) if init: - if init != object.__init__: - return True - - + return init != object.__init__ -def fillfixtures(function): - """ fill missing funcargs for a test function. """ - try: - request = function._request - except AttributeError: - # XXX this special code path is only expected to execute - # with the oejskit plugin. It uses classes with funcargs - # and we thus have to work a bit to allow this. - fm = function.session._fixturemanager - fi = fm.getfixtureinfo(function.parent, function.obj, None) - function._fixtureinfo = fi - request = function._request = FixtureRequest(function) - request._fillfixtures() - # prune out funcargs for jstests - newfuncargs = {} - for name in fi.argnames: - newfuncargs[name] = function.funcargs[name] - function.funcargs = newfuncargs - else: - request._fillfixtures() +def hasnew(obj): + new = getattr(obj, '__new__', None) + if new: + return new != object.__new__ -_notexists = object() class CallSpec2(object): def __init__(self, metafunc): @@ -827,9 +649,9 @@ def __init__(self, metafunc): self.funcargs = {} self._idlist = [] self.params = {} - self._globalid = _notexists + self._globalid = NOTSET self._globalid_args = set() - self._globalparam = _notexists + self._globalparam = NOTSET self._arg2scopenum = {} # used for sorting parametrized resources self.keywords = {} self.indices = {} @@ -855,7 +677,7 @@ def getparam(self, name): try: return self.params[name] except KeyError: - if self._globalparam is _notexists: + if self._globalparam is NOTSET: raise ValueError(name) return self._globalparam @@ -878,55 +700,41 @@ def setall(self, funcargs, id, param): for x in funcargs: self._checkargnotcontained(x) self.funcargs.update(funcargs) - if id is not _notexists: + if id is not NOTSET: self._idlist.append(id) - if param is not _notexists: - assert self._globalparam is _notexists + if param is not NOTSET: + assert self._globalparam is NOTSET self._globalparam = param for arg in funcargs: - self._arg2scopenum[arg] = scopenum_function + self._arg2scopenum[arg] = fixtures.scopenum_function -class FuncargnamesCompatAttr: - """ helper class so that Metafunc, Function and FixtureRequest - don't need to each define the "funcargnames" compatibility attribute. - """ - @property - def funcargnames(self): - """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" - return self.fixturenames - -class Metafunc(FuncargnamesCompatAttr): +class Metafunc(fixtures.FuncargnamesCompatAttr): """ Metafunc objects are passed to the ``pytest_generate_tests`` hook. They help to inspect a test function and to generate tests according to test configuration or values specified in the class or module where a test function is defined. - - :ivar fixturenames: set of fixture names required by the test function - - :ivar function: underlying python test function - - :ivar cls: class object where the test function is defined in or ``None``. - - :ivar module: the module object where the test function is defined in. - - :ivar config: access to the :class:`_pytest.config.Config` object for the - test session. - - :ivar funcargnames: - .. deprecated:: 2.3 - Use ``fixturenames`` instead. """ def __init__(self, function, fixtureinfo, config, cls=None, module=None): + #: access to the :class:`_pytest.config.Config` object for the test session self.config = config + + #: the module object where the test function is defined in. self.module = module + + #: underlying python test function self.function = function + + #: set of fixture names required by the test function self.fixturenames = fixtureinfo.names_closure - self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + #: class object where the test function is defined in or ``None``. self.cls = cls + self._calls = [] self._ids = py.builtin.set() + self._arg2fixturedefs = fixtureinfo.name2fixturedefs def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): @@ -954,7 +762,8 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, :arg ids: list of string ids, or a callable. If strings, each is corresponding to the argvalues so that they are - part of the test id. + part of the test id. If None is given as id of specific test, the + automatically generated id for that argument will be used. If callable, it should take one argument (a single argvalue) and return a string or return None. If None, the automatically generated id for that argument will be used. @@ -966,45 +775,45 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, It will also override any fixture-function defined scope, allowing to set a dynamic scope using test context or configuration. """ - - # individual parametrized argument sets can be wrapped in a series - # of markers in which case we unwrap the values and apply the mark - # at Function init - newkeywords = {} - unwrapped_argvalues = [] - for i, argval in enumerate(argvalues): - while isinstance(argval, MarkDecorator): - newmark = MarkDecorator(argval.markname, - argval.args[:-1], argval.kwargs) - newmarks = newkeywords.setdefault(i, {}) - newmarks[newmark.markname] = newmark - argval = argval.args[-1] - unwrapped_argvalues.append(argval) - argvalues = unwrapped_argvalues + from _pytest.fixtures import scope2index + from _pytest.mark import MARK_GEN, ParameterSet + from py.io import saferepr if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] - if len(argnames) == 1: - argvalues = [(val,) for val in argvalues] - if not argvalues: - argvalues = [(_notexists,) * len(argnames)] - # we passed a empty list to parameterize, skip that test - # - fs, lineno = getfslineno(self.function) - newmark = pytest.mark.skip( - reason="got empty parameter set %r, function %s at %s:%d" % ( - argnames, self.function.__name__, fs, lineno)) - newmarks = newkeywords.setdefault(0, {}) - newmarks[newmark.markname] = newmark + force_tuple = len(argnames) == 1 + else: + force_tuple = False + parameters = [ + ParameterSet.extract_from(x, legacy_force_tuple=force_tuple) + for x in argvalues] + del argvalues + if not parameters: + fs, lineno = getfslineno(self.function) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, self.function.__name__, fs, lineno) + mark = MARK_GEN.skip(reason=reason) + parameters.append(ParameterSet( + values=(NOTSET,) * len(argnames), + marks=[mark], + id=None, + )) if scope is None: - scope = "function" - scopenum = scopes.index(scope) + scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize)) valtypes = {} for arg in argnames: if arg not in self.fixturenames: - raise ValueError("%r uses no fixture %r" %(self.function, arg)) + if isinstance(indirect, (tuple, list)): + name = 'fixture' if arg in indirect else 'argument' + else: + name = 'fixture' if indirect else 'argument' + raise ValueError( + "%r uses no %s %r" % ( + self.function, name, arg)) if indirect is True: valtypes = dict.fromkeys(argnames, "params") @@ -1014,30 +823,38 @@ def parametrize(self, argnames, argvalues, indirect=False, ids=None, valtypes = dict.fromkeys(argnames, "funcargs") for arg in indirect: if arg not in argnames: - raise ValueError("indirect given to %r: fixture %r doesn't exist" %( + raise ValueError("indirect given to %r: fixture %r doesn't exist" % ( self.function, arg)) valtypes[arg] = "params" idfn = None if callable(ids): idfn = ids ids = None - if ids and len(ids) != len(argvalues): - raise ValueError('%d tests specified with %d ids' %( - len(argvalues), len(ids))) - if not ids: - ids = idmaker(argnames, argvalues, idfn) + if ids: + if len(ids) != len(parameters): + raise ValueError('%d tests specified with %d ids' % ( + len(parameters), len(ids))) + for id_value in ids: + if id_value is not None and not isinstance(id_value, py.builtin._basestring): + msg = 'ids must be list of strings, found: %s (type: %s)' + raise ValueError(msg % (saferepr(id_value), type(id_value).__name__)) + ids = idmaker(argnames, parameters, idfn, ids, self.config) newcalls = [] for callspec in self._calls or [CallSpec2(self)]: - for param_index, valset in enumerate(argvalues): - assert len(valset) == len(argnames) + elements = zip(ids, parameters, count()) + for a_id, param, param_index in elements: + if len(param.values) != len(argnames): + raise ValueError( + 'In "parametrize" the number of values ({0}) must be ' + 'equal to the number of names ({1})'.format( + param.values, argnames)) newcallspec = callspec.copy(self) - newcallspec.setmulti(valtypes, argnames, valset, ids[param_index], - newkeywords.get(param_index, {}), scopenum, - param_index) + newcallspec.setmulti(valtypes, argnames, param.values, a_id, + param.deprecated_arg_dict, scopenum, param_index) newcalls.append(newcallspec) self._calls = newcalls - def addcall(self, funcargs=None, id=_notexists, param=_notexists): + def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): """ (deprecated, use parametrize) Add a new call to the underlying test function during the collection phase of a test run. Note that request.addcall() is called during the test collection phase prior and @@ -1057,12 +874,12 @@ def addcall(self, funcargs=None, id=_notexists, param=_notexists): if funcargs is not None: for name in funcargs: if name not in self.fixturenames: - pytest.fail("funcarg %r not used in this function." % name) + fail("funcarg %r not used in this function." % name) else: funcargs = {} if id is None: raise ValueError("id=None not allowed") - if id is _notexists: + if id is NOTSET: id = len(self._calls) id = str(id) if id in self._ids: @@ -1074,86 +891,155 @@ def addcall(self, funcargs=None, id=_notexists, param=_notexists): self._calls.append(cs) -if _PY3: - import codecs +def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): + """Find the most appropriate scope for a parametrized call based on its arguments. - def _escape_bytes(val): - """ - If val is pure ascii, returns it as a str(), otherwise escapes - into a sequence of escaped bytes: - b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' - - note: - the obvious "v.decode('unicode-escape')" will return - valid utf-8 unicode if it finds them in the string, but we - want to return escaped bytes for any byte, even if they match - a utf-8 string. - """ - if val: - # source: http://goo.gl/bGsnwC - encoded_bytes, _ = codecs.escape_encode(val) - return encoded_bytes.decode('ascii') - else: - # empty bytes crashes codecs.escape_encode (#1087) - return '' -else: - def _escape_bytes(val): - """ - In py2 bytes and str are the same type, so return it unchanged if it - is a full ascii string, otherwise escape it into its binary form. - """ - try: - return val.decode('ascii') - except UnicodeDecodeError: - return val.encode('string-escape') + When there's at least one direct argument, always use "function" scope. + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. -def _idval(val, argname, idx, idfn): + Related to issue #1832, based on code posted by @Kingdread. + """ + from _pytest.fixtures import scopes + indirect_as_list = isinstance(indirect, (list, tuple)) + all_arguments_are_fixtures = indirect is True or \ + indirect_as_list and len(indirect) == argnames + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()] + if used_scopes: + # Takes the most narrow scope from used fixtures + for scope in reversed(scopes): + if scope in used_scopes: + return scope + + return 'function' + + +def _idval(val, argname, idx, idfn, config=None): if idfn: + s = None try: s = idfn(val) - if s: - return s except Exception: - pass - - if isinstance(val, bytes): - return _escape_bytes(val) - elif isinstance(val, (float, int, str, bool, NoneType)): + # See issue https://github.com/pytest-dev/pytest/issues/2169 + import warnings + msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx) + msg += '\nUpdate your code as this will raise an error in pytest-4.0.' + warnings.warn(msg, DeprecationWarning) + if s: + return _escape_strings(s) + + if config: + hook_id = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname) + if hook_id: + return hook_id + + if isinstance(val, STRING_TYPES): + return _escape_strings(val) + elif isinstance(val, (float, int, bool, NoneType)): return str(val) elif isinstance(val, REGEX_TYPE): - return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern + return _escape_strings(val.pattern) elif enum is not None and isinstance(val, enum.Enum): return str(val) elif isclass(val) and hasattr(val, '__name__'): return val.__name__ - elif _PY2 and isinstance(val, unicode): - # special case for python 2: if a unicode string is - # convertible to ascii, return it as an str() object instead - try: - return str(val) - except UnicodeError: - # fallthrough - pass return str(argname)+str(idx) -def _idvalset(idx, valset, argnames, idfn): - this_id = [_idval(val, argname, idx, idfn) - for val, argname in zip(valset, argnames)] - return "-".join(this_id) - -def idmaker(argnames, argvalues, idfn=None): - ids = [_idvalset(valindex, valset, argnames, idfn) - for valindex, valset in enumerate(argvalues)] - if len(set(ids)) < len(ids): - # user may have provided a bad idfn which means the ids are not unique - ids = [str(i) + testid for i, testid in enumerate(ids)] + +def _idvalset(idx, parameterset, argnames, idfn, ids, config=None): + if parameterset.id is not None: + return parameterset.id + if ids is None or (idx >= len(ids) or ids[idx] is None): + this_id = [_idval(val, argname, idx, idfn, config) + for val, argname in zip(parameterset.values, argnames)] + return "-".join(this_id) + else: + return _escape_strings(ids[idx]) + + +def idmaker(argnames, parametersets, idfn=None, ids=None, config=None): + ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config) + for valindex, parameterset in enumerate(parametersets)] + if len(set(ids)) != len(ids): + # The ids are not unique + duplicates = [testid for testid in ids if ids.count(testid) > 1] + counters = collections.defaultdict(lambda: 0) + for index, testid in enumerate(ids): + if testid in duplicates: + ids[index] = testid + str(counters[testid]) + counters[testid] += 1 return ids + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config, session): + import _pytest.config + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_rel(func): + loc = getlocation(func, curdir) + return curdir.bestrelpath(loc) + + def write_fixture(fixture_def): + argname = fixture_def.argname + + if verbose <= 0 and argname.startswith("_"): + return + if verbose > 0: + bestrel = get_best_rel(fixture_def.func) + funcargspec = "{0} -- {1}".format(argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + + INDENT = ' {0}' + fixture_doc = fixture_def.func.__doc__ + + if fixture_doc: + for line in fixture_doc.strip().split('\n'): + tw.line(INDENT.format(line.strip())) + else: + tw.line(INDENT.format('no docstring available'), red=True) + + def write_item(item): + name2fixturedefs = item._fixtureinfo.name2fixturedefs + + if not name2fixturedefs: + # The given test item does not use any fixtures + return + bestrel = get_best_rel(item.function) + + tw.line() + tw.sep('-', 'fixtures used by {0}'.format(item.name)) + tw.sep('-', '({0})'.format(bestrel)) + for argname, fixture_defs in sorted(name2fixturedefs.items()): + assert fixture_defs is not None + if not fixture_defs: + continue + # The last fixture def item in the list is expected + # to be the one used by the test item + write_fixture(fixture_defs[-1]) + + for item in session.items: + write_item(item) + + def showfixtures(config): from _pytest.main import wrap_session return wrap_session(config, _showfixtures_main) + def _showfixtures_main(config, session): import _pytest.config session.perform_collect() @@ -1164,12 +1050,17 @@ def _showfixtures_main(config, session): fm = session._fixturemanager available = [] + seen = set() + for argname, fixturedefs in fm._arg2fixturedefs.items(): assert fixturedefs is not None if not fixturedefs: continue for fixturedef in fixturedefs: loc = getlocation(fixturedef.func, curdir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) available.append((len(fixturedef.baseid), fixturedef.func.__module__, curdir.bestrelpath(loc), @@ -1199,18 +1090,12 @@ def _showfixtures_main(config, session): tw.line(" %s: no docstring available" %(loc,), red=True) -def getlocation(function, curdir): - import inspect - fn = py.path.local(inspect.getfile(function)) - lineno = py.builtin._getcode(function).co_firstlineno - if fn.relto(curdir): - fn = fn.relto(curdir) - return "%s:%d" %(fn, lineno+1) # builtin pytest.raises helper def raises(expected_exception, *args, **kwargs): - """ assert that a code block/function call raises ``expected_exception`` + """ + Assert that a code block/function call raises ``expected_exception`` and raise a failure exception otherwise. This helper produces a ``ExceptionInfo()`` object (see below). @@ -1221,6 +1106,18 @@ def raises(expected_exception, *args, **kwargs): >>> with raises(ZeroDivisionError): ... 1/0 + .. versionchanged:: 2.10 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message:: + + >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): + ... pass + Traceback (most recent call last): + ... + Failed: Expecting ZeroDivisionError + + .. note:: When using ``pytest.raises`` as a context manager, it's worthwhile to @@ -1229,19 +1126,30 @@ def raises(expected_exception, *args, **kwargs): Lines of code after that, within the scope of the context manager will not be executed. For example:: - >>> with raises(OSError) as exc_info: - assert 1 == 1 # this will execute as expected - raise OSError(errno.EEXISTS, 'directory exists') - assert exc_info.value.errno == errno.EEXISTS # this will not execute + >>> value = 15 + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type == ValueError # this will not execute Instead, the following approach must be taken (note the difference in scope):: - >>> with raises(OSError) as exc_info: - assert 1 == 1 # this will execute as expected - raise OSError(errno.EEXISTS, 'directory exists') + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type == ValueError + + Or you can use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") - assert exc_info.value.errno == errno.EEXISTS # this will now execute Or you can specify a callable by passing a to-be-called lambda:: @@ -1281,12 +1189,6 @@ def raises(expected_exception, *args, **kwargs): """ __tracebackhide__ = True - if expected_exception is AssertionError: - # we want to catch a AssertionError - # replace our subclass with the builtin one - # see https://github.com/pytest-dev/pytest/issues/176 - from _pytest.assertion.util import BuiltinAssertionError \ - as expected_exception msg = ("exceptions must be old-style classes or" " derived from BaseException, not %s") if isinstance(expected_exception, tuple): @@ -1296,8 +1198,16 @@ def raises(expected_exception, *args, **kwargs): elif not isclass(expected_exception): raise TypeError(msg % type(expected_exception)) + message = "DID NOT RAISE {0}".format(expected_exception) + match_expr = None + if not args: - return RaisesContext(expected_exception) + if "message" in kwargs: + message = kwargs.pop("message") + if "match" in kwargs: + match_expr = kwargs.pop("match") + message += " matching '{0}'".format(match_expr) + return RaisesContext(expected_exception, message, match_expr) elif isinstance(args[0], str): code, = args assert isinstance(code, str) @@ -1318,11 +1228,17 @@ def raises(expected_exception, *args, **kwargs): func(*args[1:], **kwargs) except expected_exception: return _pytest._code.ExceptionInfo() - pytest.fail("DID NOT RAISE {0}".format(expected_exception)) + fail(message) + + +raises.Exception = fail.Exception + class RaisesContext(object): - def __init__(self, expected_exception): + def __init__(self, expected_exception, message, match_expr): self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr self.excinfo = None def __enter__(self): @@ -1332,7 +1248,7 @@ def __enter__(self): def __exit__(self, *tp): __tracebackhide__ = True if tp[0] is None: - pytest.fail("DID NOT RAISE") + fail(self.message) if sys.version_info < (2, 7): # py26: on __exit__() exc_value often does not contain the # exception value. @@ -1341,20 +1257,276 @@ def __exit__(self, *tp): exc_type, value, traceback = tp tp = exc_type, exc_type(value), traceback self.excinfo.__init__(tp) - return issubclass(self.excinfo.type, self.expected_exception) + suppress_exception = issubclass(self.excinfo.type, self.expected_exception) + if sys.version_info[0] == 2 and suppress_exception: + sys.exc_clear() + if self.match_expr: + self.excinfo.match(self.match_expr) + return suppress_exception + + +# builtin pytest.approx helper + +class approx(object): + """ + Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works on sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinite numbers are another special case. They are only + considered equal to themselves, regardless of the relative tolerance. Both + the relative and absolute tolerances can be changed by passing arguments to + the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. Only available in python>=3.5. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + """ + + def __init__(self, expected, rel=None, abs=None): + self.expected = expected + self.abs = abs + self.rel = rel + + def __repr__(self): + return ', '.join(repr(x) for x in self.expected) + + def __eq__(self, actual): + from collections import Iterable + if not isinstance(actual, Iterable): + actual = [actual] + if len(actual) != len(self.expected): + return False + return all(a == x for a, x in zip(actual, self.expected)) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + @property + def expected(self): + # Regardless of whether the user-specified expected value is a number + # or a sequence of numbers, return a list of ApproxNotIterable objects + # that can be compared against. + from collections import Iterable + approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs) + if isinstance(self._expected, Iterable): + return [approx_non_iter(x) for x in self._expected] + else: + return [approx_non_iter(self._expected)] + + @expected.setter + def expected(self, expected): + self._expected = expected + + +class ApproxNonIterable(object): + """ + Perform approximate comparisons for single numbers only. + + In other words, the ``expected`` attribute for objects of this class must + be some sort of number. This is in contrast to the ``approx`` class, where + the ``expected`` attribute can either be a number of a sequence of numbers. + This class is responsible for making comparisons, while ``approx`` is + responsible for abstracting the difference between numbers and sequences of + numbers. Although this class can stand on its own, it's only meant to be + used within ``approx``. + """ + + def __init__(self, expected, rel=None, abs=None): + self.expected = expected + self.abs = abs + self.rel = rel + + def __repr__(self): + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = '{:.1e}'.format(self.tolerance) + except ValueError: + vetted_tolerance = '???' + + if sys.version_info[0] == 2: + return '{0} +- {1}'.format(self.expected, vetted_tolerance) + else: + return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + @property + def tolerance(self): + set_default = lambda x, default: x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, 1e-12) + + if absolute_tolerance < 0: + raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance)) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance)) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + # # the basic pytest Function item # -class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr): +class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): """ a Function Item is responsible for setting up and executing a Python test function. """ _genid = None def __init__(self, name, parent, args=None, config=None, callspec=None, callobj=NOTSET, keywords=None, session=None, - fixtureinfo=None): + fixtureinfo=None, originalname=None): super(Function, self).__init__(name, parent, config=config, session=session) self._args = args @@ -1376,6 +1548,12 @@ def __init__(self, name, parent, args=None, config=None, self.fixturenames = fixtureinfo.names_closure self._initrequest() + #: original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname + def _initrequest(self): self.funcargs = {} if self._isyieldedfunction(): @@ -1388,7 +1566,7 @@ def _initrequest(self): self._genid = callspec.id if hasattr(callspec, "param"): self.param = callspec.param - self._request = FixtureRequest(self) + self._request = fixtures.FixtureRequest(self) @property def function(self): @@ -1416,885 +1594,4 @@ def runtest(self): def setup(self): super(Function, self).setup() - fillfixtures(self) - - -scope2props = dict(session=()) -scope2props["module"] = ("fspath", "module") -scope2props["class"] = scope2props["module"] + ("cls",) -scope2props["instance"] = scope2props["class"] + ("instance", ) -scope2props["function"] = scope2props["instance"] + ("function", "keywords") - -def scopeproperty(name=None, doc=None): - def decoratescope(func): - scopename = name or func.__name__ - def provide(self): - if func.__name__ in scope2props[self.scope]: - return func(self) - raise AttributeError("%s not available in %s-scoped context" % ( - scopename, self.scope)) - return property(provide, None, None, func.__doc__) - return decoratescope - - -class FixtureRequest(FuncargnamesCompatAttr): - """ A request for a fixture from a test or fixture function. - - A request object gives access to the requesting test context - and has an optional ``param`` attribute in case - the fixture is parametrized indirectly. - """ - - def __init__(self, pyfuncitem): - self._pyfuncitem = pyfuncitem - #: fixture for which this request is being performed - self.fixturename = None - #: Scope string, one of "function", "class", "module", "session" - self.scope = "function" - self._funcargs = {} - self._fixturedefs = {} - fixtureinfo = pyfuncitem._fixtureinfo - self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() - self._arg2index = {} - self.fixturenames = fixtureinfo.names_closure - self._fixturemanager = pyfuncitem.session._fixturemanager - - @property - def node(self): - """ underlying collection node (depends on current request scope)""" - return self._getscopeitem(self.scope) - - - def _getnextfixturedef(self, argname): - fixturedefs = self._arg2fixturedefs.get(argname, None) - if fixturedefs is None: - # we arrive here because of a a dynamic call to - # getfuncargvalue(argname) usage which was naturally - # not known at parsing/collection time - fixturedefs = self._fixturemanager.getfixturedefs( - argname, self._pyfuncitem.parent.nodeid) - self._arg2fixturedefs[argname] = fixturedefs - # fixturedefs list is immutable so we maintain a decreasing index - index = self._arg2index.get(argname, 0) - 1 - if fixturedefs is None or (-index > len(fixturedefs)): - raise FixtureLookupError(argname, self) - self._arg2index[argname] = index - return fixturedefs[index] - - @property - def config(self): - """ the pytest config object associated with this request. """ - return self._pyfuncitem.config - - - @scopeproperty() - def function(self): - """ test function object if the request has a per-function scope. """ - return self._pyfuncitem.obj - - @scopeproperty("class") - def cls(self): - """ class (can be None) where the test function was collected. """ - clscol = self._pyfuncitem.getparent(pytest.Class) - if clscol: - return clscol.obj - - @property - def instance(self): - """ instance (can be None) on which test function was collected. """ - # unittest support hack, see _pytest.unittest.TestCaseFunction - try: - return self._pyfuncitem._testcase - except AttributeError: - function = getattr(self, "function", None) - if function is not None: - return py.builtin._getimself(function) - - @scopeproperty() - def module(self): - """ python module object where the test function was collected. """ - return self._pyfuncitem.getparent(pytest.Module).obj - - @scopeproperty() - def fspath(self): - """ the file system path of the test module which collected this test. """ - return self._pyfuncitem.fspath - - @property - def keywords(self): - """ keywords/markers dictionary for the underlying node. """ - return self.node.keywords - - @property - def session(self): - """ pytest session object. """ - return self._pyfuncitem.session - - def addfinalizer(self, finalizer): - """ add finalizer/teardown function to be called after the - last test within the requesting test context finished - execution. """ - # XXX usually this method is shadowed by fixturedef specific ones - self._addfinalizer(finalizer, scope=self.scope) - - def _addfinalizer(self, finalizer, scope): - colitem = self._getscopeitem(scope) - self._pyfuncitem.session._setupstate.addfinalizer( - finalizer=finalizer, colitem=colitem) - - def applymarker(self, marker): - """ Apply a marker to a single test function invocation. - This method is useful if you don't want to have a keyword/marker - on all function invocations. - - :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object - created by a call to ``pytest.mark.NAME(...)``. - """ - try: - self.node.keywords[marker.markname] = marker - except AttributeError: - raise ValueError(marker) - - def raiseerror(self, msg): - """ raise a FixtureLookupError with the given message. """ - raise self._fixturemanager.FixtureLookupError(None, self, msg) - - def _fillfixtures(self): - item = self._pyfuncitem - fixturenames = getattr(item, "fixturenames", self.fixturenames) - for argname in fixturenames: - if argname not in item.funcargs: - item.funcargs[argname] = self.getfuncargvalue(argname) - - def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): - """ (deprecated) Return a testing resource managed by ``setup`` & - ``teardown`` calls. ``scope`` and ``extrakey`` determine when the - ``teardown`` function will be called so that subsequent calls to - ``setup`` would recreate the resource. With pytest-2.3 you often - do not need ``cached_setup()`` as you can directly declare a scope - on a fixture function and register a finalizer through - ``request.addfinalizer()``. - - :arg teardown: function receiving a previously setup resource. - :arg setup: a no-argument function creating a resource. - :arg scope: a string value out of ``function``, ``class``, ``module`` - or ``session`` indicating the caching lifecycle of the resource. - :arg extrakey: added to internal caching key of (funcargname, scope). - """ - if not hasattr(self.config, '_setupcache'): - self.config._setupcache = {} # XXX weakref? - cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) - cache = self.config._setupcache - try: - val = cache[cachekey] - except KeyError: - self._check_scope(self.fixturename, self.scope, scope) - val = setup() - cache[cachekey] = val - if teardown is not None: - def finalizer(): - del cache[cachekey] - teardown(val) - self._addfinalizer(finalizer, scope=scope) - return val - - def getfuncargvalue(self, argname): - """ Dynamically retrieve a named fixture function argument. - - As of pytest-2.3, it is easier and usually better to access other - fixture values by stating it as an input argument in the fixture - function. If you only can decide about using another fixture at test - setup time, you may use this function to retrieve it inside a fixture - function body. - """ - return self._get_active_fixturedef(argname).cached_result[0] - - def _get_active_fixturedef(self, argname): - try: - return self._fixturedefs[argname] - except KeyError: - try: - fixturedef = self._getnextfixturedef(argname) - except FixtureLookupError: - if argname == "request": - class PseudoFixtureDef: - cached_result = (self, [0], None) - scope = "function" - return PseudoFixtureDef - raise - # remove indent to prevent the python3 exception - # from leaking into the call - result = self._getfuncargvalue(fixturedef) - self._funcargs[argname] = result - self._fixturedefs[argname] = fixturedef - return fixturedef - - def _get_fixturestack(self): - current = self - l = [] - while 1: - fixturedef = getattr(current, "_fixturedef", None) - if fixturedef is None: - l.reverse() - return l - l.append(fixturedef) - current = current._parent_request - - def _getfuncargvalue(self, fixturedef): - # prepare a subrequest object before calling fixture function - # (latter managed by fixturedef) - argname = fixturedef.argname - funcitem = self._pyfuncitem - scope = fixturedef.scope - try: - param = funcitem.callspec.getparam(argname) - except (AttributeError, ValueError): - param = NOTSET - param_index = 0 - else: - # indices might not be set if old-style metafunc.addcall() was used - param_index = funcitem.callspec.indices.get(argname, 0) - # if a parametrize invocation set a scope it will override - # the static scope defined with the fixture function - paramscopenum = funcitem.callspec._arg2scopenum.get(argname) - if paramscopenum is not None: - scope = scopes[paramscopenum] - - subrequest = SubRequest(self, scope, param, param_index, fixturedef) - - # check if a higher-level scoped fixture accesses a lower level one - subrequest._check_scope(argname, self.scope, scope) - - # clear sys.exc_info before invoking the fixture (python bug?) - # if its not explicitly cleared it will leak into the call - exc_clear() - try: - # call the fixture function - val = fixturedef.execute(request=subrequest) - finally: - # if fixture function failed it might have registered finalizers - self.session._setupstate.addfinalizer(fixturedef.finish, - subrequest.node) - return val - - def _check_scope(self, argname, invoking_scope, requested_scope): - if argname == "request": - return - if scopemismatch(invoking_scope, requested_scope): - # try to report something helpful - lines = self._factorytraceback() - pytest.fail("ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" %( - (requested_scope, argname, invoking_scope, "\n".join(lines))), - pytrace=False) - - def _factorytraceback(self): - lines = [] - for fixturedef in self._get_fixturestack(): - factory = fixturedef.func - fs, lineno = getfslineno(factory) - p = self._pyfuncitem.session.fspath.bestrelpath(fs) - args = _format_args(factory) - lines.append("%s:%d: def %s%s" %( - p, lineno, factory.__name__, args)) - return lines - - def _getscopeitem(self, scope): - if scope == "function": - # this might also be a non-function Item despite its attribute name - return self._pyfuncitem - node = get_scope_node(self._pyfuncitem, scope) - if node is None and scope == "class": - # fallback to function item itself - node = self._pyfuncitem - assert node - return node - - def __repr__(self): - return "" %(self.node) - - -class SubRequest(FixtureRequest): - """ a sub request for handling getting a fixture from a - test function/fixture. """ - def __init__(self, request, scope, param, param_index, fixturedef): - self._parent_request = request - self.fixturename = fixturedef.argname - if param is not NOTSET: - self.param = param - self.param_index = param_index - self.scope = scope - self._fixturedef = fixturedef - self.addfinalizer = fixturedef.addfinalizer - self._pyfuncitem = request._pyfuncitem - self._funcargs = request._funcargs - self._fixturedefs = request._fixturedefs - self._arg2fixturedefs = request._arg2fixturedefs - self._arg2index = request._arg2index - self.fixturenames = request.fixturenames - self._fixturemanager = request._fixturemanager - - def __repr__(self): - return "" % (self.fixturename, self._pyfuncitem) - - -class ScopeMismatchError(Exception): - """ A fixture function tries to use a different fixture function which - which has a lower scope (e.g. a Session one calls a function one) - """ - -scopes = "session module class function".split() -scopenum_function = scopes.index("function") -def scopemismatch(currentscope, newscope): - return scopes.index(newscope) > scopes.index(currentscope) - - -class FixtureLookupError(LookupError): - """ could not return a requested Fixture (missing or invalid). """ - def __init__(self, argname, request, msg=None): - self.argname = argname - self.request = request - self.fixturestack = request._get_fixturestack() - self.msg = msg - - def formatrepr(self): - tblines = [] - addline = tblines.append - stack = [self.request._pyfuncitem.obj] - stack.extend(map(lambda x: x.func, self.fixturestack)) - msg = self.msg - if msg is not None: - # the last fixture raise an error, let's present - # it at the requesting side - stack = stack[:-1] - for function in stack: - fspath, lineno = getfslineno(function) - try: - lines, _ = inspect.getsourcelines(get_real_func(function)) - except (IOError, IndexError): - error_msg = "file %s, line %s: source code not available" - addline(error_msg % (fspath, lineno+1)) - else: - addline("file %s, line %s" % (fspath, lineno+1)) - for i, line in enumerate(lines): - line = line.rstrip() - addline(" " + line) - if line.lstrip().startswith('def'): - break - - if msg is None: - fm = self.request._fixturemanager - available = [] - for name, fixturedef in fm._arg2fixturedefs.items(): - parentid = self.request._pyfuncitem.parent.nodeid - faclist = list(fm._matchfactories(fixturedef, parentid)) - if faclist: - available.append(name) - msg = "fixture %r not found" % (self.argname,) - msg += "\n available fixtures: %s" %(", ".join(available),) - msg += "\n use 'py.test --fixtures [testpath]' for help on them." - - return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) - -class FixtureLookupErrorRepr(TerminalRepr): - def __init__(self, filename, firstlineno, tblines, errorstring, argname): - self.tblines = tblines - self.errorstring = errorstring - self.filename = filename - self.firstlineno = firstlineno - self.argname = argname - - def toterminal(self, tw): - #tw.line("FixtureLookupError: %s" %(self.argname), red=True) - for tbline in self.tblines: - tw.line(tbline.rstrip()) - for line in self.errorstring.split("\n"): - tw.line(" " + line.strip(), red=True) - tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno+1)) - -class FixtureManager: - """ - pytest fixtures definitions and information is stored and managed - from this class. - - During collection fm.parsefactories() is called multiple times to parse - fixture function definitions into FixtureDef objects and internal - data structures. - - During collection of test functions, metafunc-mechanics instantiate - a FuncFixtureInfo object which is cached per node/func-name. - This FuncFixtureInfo object is later retrieved by Function nodes - which themselves offer a fixturenames attribute. - - The FuncFixtureInfo object holds information about fixtures and FixtureDefs - relevant for a particular function. An initial list of fixtures is - assembled like this: - - - ini-defined usefixtures - - autouse-marked fixtures along the collection chain up from the function - - usefixtures markers at module/class/function level - - test function funcargs - - Subsequently the funcfixtureinfo.fixturenames attribute is computed - as the closure of the fixtures needed to setup the initial fixtures, - i. e. fixtures needed by fixture functions themselves are appended - to the fixturenames list. - - Upon the test-setup phases all fixturenames are instantiated, retrieved - by a lookup of their FuncFixtureInfo. - """ - - _argprefix = "pytest_funcarg__" - FixtureLookupError = FixtureLookupError - FixtureLookupErrorRepr = FixtureLookupErrorRepr - - def __init__(self, session): - self.session = session - self.config = session.config - self._arg2fixturedefs = {} - self._holderobjseen = set() - self._arg2finish = {} - self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] - session.config.pluginmanager.register(self, "funcmanage") - - - def getfixtureinfo(self, node, func, cls, funcargs=True): - if funcargs and not hasattr(node, "nofuncargs"): - if cls is not None: - startindex = 1 - else: - startindex = None - argnames = getfuncargnames(func, startindex) - else: - argnames = () - usefixtures = getattr(func, "usefixtures", None) - initialnames = argnames - if usefixtures is not None: - initialnames = usefixtures.args + initialnames - fm = node.session._fixturemanager - names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, - node) - return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) - - def pytest_plugin_registered(self, plugin): - nodeid = None - try: - p = py.path.local(plugin.__file__) - except AttributeError: - pass - else: - # construct the base nodeid which is later used to check - # what fixtures are visible for particular tests (as denoted - # by their test id) - if p.basename.startswith("conftest.py"): - nodeid = p.dirpath().relto(self.config.rootdir) - if p.sep != "/": - nodeid = nodeid.replace(p.sep, "/") - self.parsefactories(plugin, nodeid) - - def _getautousenames(self, nodeid): - """ return a tuple of fixture names to be used. """ - autousenames = [] - for baseid, basenames in self._nodeid_and_autousenames: - if nodeid.startswith(baseid): - if baseid: - i = len(baseid) - nextchar = nodeid[i:i+1] - if nextchar and nextchar not in ":/": - continue - autousenames.extend(basenames) - # make sure autousenames are sorted by scope, scopenum 0 is session - autousenames.sort( - key=lambda x: self._arg2fixturedefs[x][-1].scopenum) - return autousenames - - def getfixtureclosure(self, fixturenames, parentnode): - # collect the closure of all fixtures , starting with the given - # fixturenames as the initial set. As we have to visit all - # factory definitions anyway, we also return a arg2fixturedefs - # mapping so that the caller can reuse it and does not have - # to re-discover fixturedefs again for each fixturename - # (discovering matching fixtures for a given name/node is expensive) - - parentid = parentnode.nodeid - fixturenames_closure = self._getautousenames(parentid) - def merge(otherlist): - for arg in otherlist: - if arg not in fixturenames_closure: - fixturenames_closure.append(arg) - merge(fixturenames) - arg2fixturedefs = {} - lastlen = -1 - while lastlen != len(fixturenames_closure): - lastlen = len(fixturenames_closure) - for argname in fixturenames_closure: - if argname in arg2fixturedefs: - continue - fixturedefs = self.getfixturedefs(argname, parentid) - if fixturedefs: - arg2fixturedefs[argname] = fixturedefs - merge(fixturedefs[-1].argnames) - return fixturenames_closure, arg2fixturedefs - - def pytest_generate_tests(self, metafunc): - for argname in metafunc.fixturenames: - faclist = metafunc._arg2fixturedefs.get(argname) - if faclist: - fixturedef = faclist[-1] - if fixturedef.params is not None: - func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) - # skip directly parametrized arguments - argnames = func_params[0] - if not isinstance(argnames, (tuple, list)): - argnames = [x.strip() for x in argnames.split(",") if x.strip()] - if argname not in func_params and argname not in argnames: - metafunc.parametrize(argname, fixturedef.params, - indirect=True, scope=fixturedef.scope, - ids=fixturedef.ids) - else: - continue # will raise FixtureLookupError at setup time - - def pytest_collection_modifyitems(self, items): - # separate parametrized setups - items[:] = reorder_items(items) - - def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): - if nodeid is not NOTSET: - holderobj = node_or_obj - else: - holderobj = node_or_obj.obj - nodeid = node_or_obj.nodeid - if holderobj in self._holderobjseen: - return - self._holderobjseen.add(holderobj) - autousenames = [] - for name in dir(holderobj): - obj = getattr(holderobj, name, None) - # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) - # or are "@pytest.fixture" marked - marker = getfixturemarker(obj) - if marker is None: - if not name.startswith(self._argprefix): - continue - if not callable(obj): - continue - marker = defaultfuncargprefixmarker - name = name[len(self._argprefix):] - elif not isinstance(marker, FixtureFunctionMarker): - # magic globals with __getattr__ might have got us a wrong - # fixture attribute - continue - else: - assert not name.startswith(self._argprefix) - fixturedef = FixtureDef(self, nodeid, name, obj, - marker.scope, marker.params, - yieldctx=marker.yieldctx, - unittest=unittest, ids=marker.ids) - faclist = self._arg2fixturedefs.setdefault(name, []) - if fixturedef.has_location: - faclist.append(fixturedef) - else: - # fixturedefs with no location are at the front - # so this inserts the current fixturedef after the - # existing fixturedefs from external plugins but - # before the fixturedefs provided in conftests. - i = len([f for f in faclist if not f.has_location]) - faclist.insert(i, fixturedef) - if marker.autouse: - autousenames.append(name) - if autousenames: - self._nodeid_and_autousenames.append((nodeid or '', autousenames)) - - def getfixturedefs(self, argname, nodeid): - try: - fixturedefs = self._arg2fixturedefs[argname] - except KeyError: - return None - else: - return tuple(self._matchfactories(fixturedefs, nodeid)) - - def _matchfactories(self, fixturedefs, nodeid): - for fixturedef in fixturedefs: - if nodeid.startswith(fixturedef.baseid): - yield fixturedef - - -def fail_fixturefunc(fixturefunc, msg): - fs, lineno = getfslineno(fixturefunc) - location = "%s:%s" % (fs, lineno+1) - source = _pytest._code.Source(fixturefunc) - pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, - pytrace=False) - -def call_fixture_func(fixturefunc, request, kwargs, yieldctx): - if yieldctx: - if not is_generator(fixturefunc): - fail_fixturefunc(fixturefunc, - msg="yield_fixture requires yield statement in function") - iter = fixturefunc(**kwargs) - next = getattr(iter, "__next__", None) - if next is None: - next = getattr(iter, "next") - res = next() - def teardown(): - try: - next() - except StopIteration: - pass - else: - fail_fixturefunc(fixturefunc, - "yield_fixture function has more than one 'yield'") - request.addfinalizer(teardown) - else: - if is_generator(fixturefunc): - fail_fixturefunc(fixturefunc, - msg="pytest.fixture functions cannot use ``yield``. " - "Instead write and return an inner function/generator " - "and let the consumer call and iterate over it.") - res = fixturefunc(**kwargs) - return res - -class FixtureDef: - """ A container for a factory definition. """ - def __init__(self, fixturemanager, baseid, argname, func, scope, params, - yieldctx, unittest=False, ids=None): - self._fixturemanager = fixturemanager - self.baseid = baseid or '' - self.has_location = baseid is not None - self.func = func - self.argname = argname - self.scope = scope - self.scopenum = scopes.index(scope or "function") - self.params = params - startindex = unittest and 1 or None - self.argnames = getfuncargnames(func, startindex=startindex) - self.yieldctx = yieldctx - self.unittest = unittest - self.ids = ids - self._finalizer = [] - - def addfinalizer(self, finalizer): - self._finalizer.append(finalizer) - - def finish(self): - try: - while self._finalizer: - func = self._finalizer.pop() - func() - finally: - # even if finalization fails, we invalidate - # the cached fixture value - if hasattr(self, "cached_result"): - del self.cached_result - - def execute(self, request): - # get required arguments and register our own finish() - # with their finalization - kwargs = {} - for argname in self.argnames: - fixturedef = request._get_active_fixturedef(argname) - result, arg_cache_key, exc = fixturedef.cached_result - request._check_scope(argname, request.scope, fixturedef.scope) - kwargs[argname] = result - if argname != "request": - fixturedef.addfinalizer(self.finish) - - my_cache_key = request.param_index - cached_result = getattr(self, "cached_result", None) - if cached_result is not None: - result, cache_key, err = cached_result - if my_cache_key == cache_key: - if err is not None: - py.builtin._reraise(*err) - else: - return result - # we have a previous but differently parametrized fixture instance - # so we need to tear it down before creating a new one - self.finish() - assert not hasattr(self, "cached_result") - - fixturefunc = self.func - - if self.unittest: - if request.instance is not None: - # bind the unbound method to the TestCase instance - fixturefunc = self.func.__get__(request.instance) - else: - # the fixture function needs to be bound to the actual - # request.instance so that code working with "self" behaves - # as expected. - if request.instance is not None: - fixturefunc = getimfunc(self.func) - if fixturefunc != self.func: - fixturefunc = fixturefunc.__get__(request.instance) - - try: - result = call_fixture_func(fixturefunc, request, kwargs, - self.yieldctx) - except Exception: - self.cached_result = (None, my_cache_key, sys.exc_info()) - raise - self.cached_result = (result, my_cache_key, None) - return result - - def __repr__(self): - return ("" % - (self.argname, self.scope, self.baseid)) - -def num_mock_patch_args(function): - """ return number of arguments used up by mock arguments (if any) """ - patchings = getattr(function, "patchings", None) - if not patchings: - return 0 - mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None)) - if mock is not None: - return len([p for p in patchings - if not p.attribute_name and p.new is mock.DEFAULT]) - return len(patchings) - - -def getfuncargnames(function, startindex=None): - # XXX merge with main.py's varnames - #assert not isclass(function) - realfunction = function - while hasattr(realfunction, "__wrapped__"): - realfunction = realfunction.__wrapped__ - if startindex is None: - startindex = inspect.ismethod(function) and 1 or 0 - if realfunction != function: - startindex += num_mock_patch_args(function) - function = realfunction - if isinstance(function, functools.partial): - argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0] - partial = function - argnames = argnames[len(partial.args):] - if partial.keywords: - for kw in partial.keywords: - argnames.remove(kw) - else: - argnames = inspect.getargs(_pytest._code.getrawcode(function))[0] - defaults = getattr(function, 'func_defaults', - getattr(function, '__defaults__', None)) or () - numdefaults = len(defaults) - if numdefaults: - return tuple(argnames[startindex:-numdefaults]) - return tuple(argnames[startindex:]) - -# algorithm for sorting on a per-parametrized resource setup basis -# it is called for scopenum==0 (session) first and performs sorting -# down to the lower scopes such as to minimize number of "high scope" -# setups and teardowns - -def reorder_items(items): - argkeys_cache = {} - for scopenum in range(0, scopenum_function): - argkeys_cache[scopenum] = d = {} - for item in items: - keys = set(get_parametrized_fixture_keys(item, scopenum)) - if keys: - d[item] = keys - return reorder_items_atscope(items, set(), argkeys_cache, 0) - -def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): - if scopenum >= scopenum_function or len(items) < 3: - return items - items_done = [] - while 1: - items_before, items_same, items_other, newignore = \ - slice_items(items, ignore, argkeys_cache[scopenum]) - items_before = reorder_items_atscope( - items_before, ignore, argkeys_cache,scopenum+1) - if items_same is None: - # nothing to reorder in this scope - assert items_other is None - return items_done + items_before - items_done.extend(items_before) - items = items_same + items_other - ignore = newignore - - -def slice_items(items, ignore, scoped_argkeys_cache): - # we pick the first item which uses a fixture instance in the - # requested scope and which we haven't seen yet. We slice the input - # items list into a list of items_nomatch, items_same and - # items_other - if scoped_argkeys_cache: # do we need to do work at all? - it = iter(items) - # first find a slicing key - for i, item in enumerate(it): - argkeys = scoped_argkeys_cache.get(item) - if argkeys is not None: - argkeys = argkeys.difference(ignore) - if argkeys: # found a slicing key - slicing_argkey = argkeys.pop() - items_before = items[:i] - items_same = [item] - items_other = [] - # now slice the remainder of the list - for item in it: - argkeys = scoped_argkeys_cache.get(item) - if argkeys and slicing_argkey in argkeys and \ - slicing_argkey not in ignore: - items_same.append(item) - else: - items_other.append(item) - newignore = ignore.copy() - newignore.add(slicing_argkey) - return (items_before, items_same, items_other, newignore) - return items, None, None, None - -def get_parametrized_fixture_keys(item, scopenum): - """ return list of keys for all parametrized arguments which match - the specified scope. """ - assert scopenum < scopenum_function # function - try: - cs = item.callspec - except AttributeError: - pass - else: - # cs.indictes.items() is random order of argnames but - # then again different functions (items) can change order of - # arguments so it doesn't matter much probably - for argname, param_index in cs.indices.items(): - if cs._arg2scopenum[argname] != scopenum: - continue - if scopenum == 0: # session - key = (argname, param_index) - elif scopenum == 1: # module - key = (argname, param_index, item.fspath) - elif scopenum == 2: # class - key = (argname, param_index, item.fspath, item.cls) - yield key - - -def xunitsetup(obj, name): - meth = getattr(obj, name, None) - if getfixturemarker(meth) is None: - return meth - -def getfixturemarker(obj): - """ return fixturemarker or None if it doesn't exist or raised - exceptions.""" - try: - return getattr(obj, "_pytestfixturefunction", None) - except KeyboardInterrupt: - raise - except Exception: - # some objects raise errors like request (from flask import request) - # we don't expect them to be fixture functions - return None - -scopename2class = { - 'class': Class, - 'module': Module, - 'function': pytest.Item, -} -def get_scope_node(node, scope): - cls = scopename2class.get(scope) - if cls is None: - if scope == "session": - return node.session - raise ValueError("unknown scope") - return node.getparent(cls) + fixtures.fillfixtures(self) diff --git a/third_party/python/pytest/_pytest/recwarn.py b/third_party/python/pytest/_pytest/recwarn.py index a89474c036aa5..9cc404a4901b3 100644 --- a/third_party/python/pytest/_pytest/recwarn.py +++ b/third_party/python/pytest/_pytest/recwarn.py @@ -1,4 +1,5 @@ """ recording warnings during test function execution. """ +from __future__ import absolute_import, division, print_function import inspect @@ -6,11 +7,11 @@ import py import sys import warnings -import pytest +from _pytest.fixtures import yield_fixture -@pytest.yield_fixture -def recwarn(request): +@yield_fixture +def recwarn(): """Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. @@ -25,54 +26,59 @@ def recwarn(request): yield wrec -def pytest_namespace(): - return {'deprecated_call': deprecated_call, - 'warns': warns} - - def deprecated_call(func=None, *args, **kwargs): - """ assert that calling ``func(*args, **kwargs)`` triggers a - ``DeprecationWarning`` or ``PendingDeprecationWarning``. + """context manager that can be used to ensure a block of code triggers a + ``DeprecationWarning`` or ``PendingDeprecationWarning``:: - This function can be used as a context manager:: + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 >>> with deprecated_call(): - ... myobject.deprecated_method() + ... assert api_call_v2() == 200 - Note: we cannot use WarningsRecorder here because it is still subject - to the mechanism that prevents warnings of the same type from being - triggered twice for the same module. See #1190. + ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings + types above. """ if not func: - return WarningsChecker(expected_warning=DeprecationWarning) + return _DeprecatedCallContext() + else: + __tracebackhide__ = True + with _DeprecatedCallContext(): + return func(*args, **kwargs) - categories = [] - def warn_explicit(message, category, *args, **kwargs): - categories.append(category) - old_warn_explicit(message, category, *args, **kwargs) +class _DeprecatedCallContext(object): + """Implements the logic to capture deprecation warnings as a context manager.""" - def warn(message, category=None, *args, **kwargs): + def __enter__(self): + self._captured_categories = [] + self._old_warn = warnings.warn + self._old_warn_explicit = warnings.warn_explicit + warnings.warn_explicit = self._warn_explicit + warnings.warn = self._warn + + def _warn_explicit(self, message, category, *args, **kwargs): + self._captured_categories.append(category) + + def _warn(self, message, category=None, *args, **kwargs): if isinstance(message, Warning): - categories.append(message.__class__) + self._captured_categories.append(message.__class__) else: - categories.append(category) - old_warn(message, category, *args, **kwargs) - - old_warn = warnings.warn - old_warn_explicit = warnings.warn_explicit - warnings.warn_explicit = warn_explicit - warnings.warn = warn - try: - ret = func(*args, **kwargs) - finally: - warnings.warn_explicit = old_warn_explicit - warnings.warn = old_warn - deprecation_categories = (DeprecationWarning, PendingDeprecationWarning) - if not any(issubclass(c, deprecation_categories) for c in categories): - __tracebackhide__ = True - raise AssertionError("%r did not produce DeprecationWarning" % (func,)) - return ret + self._captured_categories.append(category) + + def __exit__(self, exc_type, exc_val, exc_tb): + warnings.warn_explicit = self._old_warn_explicit + warnings.warn = self._old_warn + + if exc_type is None: + deprecation_categories = (DeprecationWarning, PendingDeprecationWarning) + if not any(issubclass(c, deprecation_categories) for c in self._captured_categories): + __tracebackhide__ = True + msg = "Did not produce DeprecationWarning or PendingDeprecationWarning" + raise AssertionError(msg) def warns(expected_warning, *args, **kwargs): @@ -110,24 +116,14 @@ def warns(expected_warning, *args, **kwargs): return func(*args[1:], **kwargs) -class RecordedWarning(object): - def __init__(self, message, category, filename, lineno, file, line): - self.message = message - self.category = category - self.filename = filename - self.lineno = lineno - self.file = file - self.line = line - - -class WarningsRecorder(object): +class WarningsRecorder(warnings.catch_warnings): """A context manager to record raised warnings. Adapted from `warnings.catch_warnings`. """ - def __init__(self, module=None): - self._module = sys.modules['warnings'] if module is None else module + def __init__(self): + super(WarningsRecorder, self).__init__(record=True) self._entered = False self._list = [] @@ -164,38 +160,20 @@ def __enter__(self): if self._entered: __tracebackhide__ = True raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - - def showwarning(message, category, filename, lineno, - file=None, line=None): - self._list.append(RecordedWarning( - message, category, filename, lineno, file, line)) - - # still perform old showwarning functionality - self._showwarning( - message, category, filename, lineno, file=file, line=line) - - self._module.showwarning = showwarning - - # allow the same warning to be raised more than once - - self._module.simplefilter('always') + self._list = super(WarningsRecorder, self).__enter__() + warnings.simplefilter('always') return self def __exit__(self, *exc_info): if not self._entered: __tracebackhide__ = True raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning + super(WarningsRecorder, self).__exit__(*exc_info) class WarningsChecker(WarningsRecorder): - def __init__(self, expected_warning=None, module=None): - super(WarningsChecker, self).__init__(module=module) + def __init__(self, expected_warning=None): + super(WarningsChecker, self).__init__() msg = ("exceptions must be old-style classes or " "derived from Warning, not %s") @@ -216,6 +194,11 @@ def __exit__(self, *exc_info): # only check if we're not currently handling an exception if all(a is None for a in exc_info): if self.expected_warning is not None: - if not any(r.category in self.expected_warning for r in self): + if not any(issubclass(r.category, self.expected_warning) + for r in self): __tracebackhide__ = True - pytest.fail("DID NOT WARN") + from _pytest.runner import fail + fail("DID NOT WARN. No warnings of type {0} was emitted. " + "The list of emitted warnings is: {1}.".format( + self.expected_warning, + [each.message for each in self])) diff --git a/third_party/python/pytest/_pytest/resultlog.py b/third_party/python/pytest/_pytest/resultlog.py index 3670f0214c9df..3e4b00cf9bb36 100644 --- a/third_party/python/pytest/_pytest/resultlog.py +++ b/third_party/python/pytest/_pytest/resultlog.py @@ -1,6 +1,7 @@ """ log machine-parseable test session result information in a plain text file. """ +from __future__ import absolute_import, division, print_function import py import os @@ -9,7 +10,7 @@ def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") group.addoption('--resultlog', '--result-log', action="store", metavar="path", default=None, - help="path for machine-readable result log.") + help="DEPRECATED path for machine-readable result log.") def pytest_configure(config): resultlog = config.option.resultlog @@ -22,6 +23,9 @@ def pytest_configure(config): config._resultlog = ResultLog(config, logfile) config.pluginmanager.register(config._resultlog) + from _pytest.deprecated import RESULT_LOG + config.warn('C1', RESULT_LOG) + def pytest_unconfigure(config): resultlog = getattr(config, '_resultlog', None) if resultlog: @@ -58,9 +62,9 @@ def __init__(self, config, logfile): self.logfile = logfile # preferably line buffered def write_log_entry(self, testpath, lettercode, longrepr): - py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) + print("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): - py.builtin.print_(" %s" % line, file=self.logfile) + print(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) diff --git a/third_party/python/pytest/_pytest/runner.py b/third_party/python/pytest/_pytest/runner.py index cde94c8c89e18..fd0b549a9abd1 100644 --- a/third_party/python/pytest/_pytest/runner.py +++ b/third_party/python/pytest/_pytest/runner.py @@ -1,20 +1,14 @@ """ basic collect and runtest protocol implementations """ +from __future__ import absolute_import, division, print_function + import bdb import sys from time import time import py -import pytest from _pytest._code.code import TerminalRepr, ExceptionInfo -def pytest_namespace(): - return { - 'fail' : fail, - 'skip' : skip, - 'importorskip' : importorskip, - 'exit' : exit, - } # # pytest plugin hooks @@ -73,7 +67,10 @@ def runtestprotocol(item, log=True, nextitem=None): rep = call_and_report(item, "setup", log) reports = [rep] if rep.passed: - reports.append(call_and_report(item, "call", log)) + if item.config.option.setupshow: + show_test_item(item) + if not item.config.option.setuponly: + reports.append(call_and_report(item, "call", log)) reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) # after all teardown hooks have been called @@ -83,6 +80,16 @@ def runtestprotocol(item, log=True, nextitem=None): item.funcargs = None return reports +def show_test_item(item): + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(' ' * 8) + tw.write(item._nodeid) + used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) + if used_fixtures: + tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures))) + def pytest_runtest_setup(item): item.session._setupstate.prepare(item) @@ -198,6 +205,36 @@ def get_sections(self, prefix): if name.startswith(prefix): yield prefix, content + @property + def longreprtext(self): + """ + Read-only property that returns the full string representation + of ``longrepr``. + + .. versionadded:: 3.0 + """ + tw = py.io.TerminalWriter(stringio=True) + tw.hasmarkup = False + self.toterminal(tw) + exc = tw.stringio.getvalue() + return exc.strip() + + @property + def capstdout(self): + """Return captured text from stdout, if capturing is enabled + + .. versionadded:: 3.0 + """ + return ''.join(content for (prefix, content) in self.get_sections('Captured stdout')) + + @property + def capstderr(self): + """Return captured text from stderr, if capturing is enabled + + .. versionadded:: 3.0 + """ + return ''.join(content for (prefix, content) in self.get_sections('Captured stderr')) + passed = property(lambda x: x.outcome == "passed") failed = property(lambda x: x.outcome == "failed") skipped = property(lambda x: x.outcome == "skipped") @@ -219,7 +256,7 @@ def pytest_runtest_makereport(item, call): if not isinstance(excinfo, ExceptionInfo): outcome = "failed" longrepr = excinfo - elif excinfo.errisinstance(pytest.skip.Exception): + elif excinfo.errisinstance(skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() longrepr = (str(r.path), r.lineno, r.message) @@ -263,8 +300,10 @@ def __init__(self, nodeid, location, keywords, outcome, #: one of 'setup', 'call', 'teardown' to indicate runtest phase. self.when = when - #: list of (secname, data) extra information which needs to - #: marshallable + #: list of pairs ``(str, str)`` of extra information which needs to + #: marshallable. Used by pytest to add captured text + #: from ``stdout`` and ``stderr``, but may be used by other plugins + #: to add arbitrary information to reports. self.sections = list(sections) #: time it took to run just the test @@ -285,7 +324,9 @@ def __init__(self, longrepr, **extra): self.__dict__.update(extra) def pytest_make_collect_report(collector): - call = CallInfo(collector._memocollect, "memocollect") + call = CallInfo( + lambda: list(collector.collect()), + 'collect') longrepr = None if not call.excinfo: outcome = "passed" @@ -447,10 +488,16 @@ class Skipped(OutcomeException): # in order to have Skipped exception printing shorter/nicer __module__ = 'builtins' + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + class Failed(OutcomeException): """ raised from an explicit call to pytest.fail() """ __module__ = 'builtins' + class Exit(KeyboardInterrupt): """ raised for immediate program exits (no tracebacks/summaries)""" def __init__(self, msg="unknown reason"): @@ -464,8 +511,10 @@ def exit(msg): __tracebackhide__ = True raise Exit(msg) + exit.Exception = Exit + def skip(msg=""): """ skip an executing test with the given message. Note: it's usually better to use the pytest.mark.skipif marker to declare a test to be @@ -474,8 +523,11 @@ def skip(msg=""): """ __tracebackhide__ = True raise Skipped(msg=msg) + + skip.Exception = Skipped + def fail(msg="", pytrace=True): """ explicitly fail an currently-executing test with the given Message. @@ -484,6 +536,8 @@ def fail(msg="", pytrace=True): """ __tracebackhide__ = True raise Failed(msg=msg, pytrace=pytrace) + + fail.Exception = Failed @@ -492,12 +546,23 @@ def importorskip(modname, minversion=None): __version__ attribute. If no minversion is specified the a skip is only triggered if the module can not be imported. """ + import warnings __tracebackhide__ = True compile(modname, '', 'eval') # to catch syntaxerrors - try: - __import__(modname) - except ImportError: - skip("could not import %r" %(modname,)) + should_skip = False + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter('ignore') + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + raise Skipped("could not import %r" %(modname,), allow_module_level=True) mod = sys.modules[modname] if minversion is None: return mod @@ -506,10 +571,10 @@ def importorskip(modname, minversion=None): try: from pkg_resources import parse_version as pv except ImportError: - skip("we have a required version for %r but can not import " - "no pkg_resources to parse version strings." %(modname,)) + raise Skipped("we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True) if verattr is None or pv(verattr) < pv(minversion): - skip("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion)) + raise Skipped("module %r has __version__ %r, required is: %r" %( + modname, verattr, minversion), allow_module_level=True) return mod - diff --git a/third_party/python/pytest/_pytest/setuponly.py b/third_party/python/pytest/_pytest/setuponly.py new file mode 100644 index 0000000000000..15e195ad5a11a --- /dev/null +++ b/third_party/python/pytest/_pytest/setuponly.py @@ -0,0 +1,74 @@ +from __future__ import absolute_import, division, print_function + +import pytest +import sys + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption('--setuponly', '--setup-only', action="store_true", + help="only setup fixtures, do not execute tests.") + group.addoption('--setupshow', '--setup-show', action="store_true", + help="show setup of fixtures while executing tests.") + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup(fixturedef, request): + yield + config = request.config + if config.option.setupshow: + if hasattr(request, 'param'): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + fixturedef.cached_param = fixturedef.ids(request.param) + else: + fixturedef.cached_param = fixturedef.ids[ + request.param_index] + else: + fixturedef.cached_param = request.param + _show_fixture_action(fixturedef, 'SETUP') + + +def pytest_fixture_post_finalizer(fixturedef): + if hasattr(fixturedef, "cached_result"): + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, 'TEARDOWN') + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action(fixturedef, msg): + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin('capturemanager') + if capman: + out, err = capman.suspendcapture() + + tw = config.get_terminal_writer() + tw.line() + tw.write(' ' * 2 * fixturedef.scopenum) + tw.write('{step} {scope} {fixture}'.format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname)) + + if msg == 'SETUP': + deps = sorted(arg for arg in fixturedef.argnames if arg != 'request') + if deps: + tw.write(' (fixtures used: {0})'.format(', '.join(deps))) + + if hasattr(fixturedef, 'cached_param'): + tw.write('[{0}]'.format(fixturedef.cached_param)) + + if capman: + capman.resumecapture() + sys.stdout.write(out) + sys.stderr.write(err) + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setuponly: + config.option.setupshow = True diff --git a/third_party/python/pytest/_pytest/setupplan.py b/third_party/python/pytest/_pytest/setupplan.py new file mode 100644 index 0000000000000..e11bd40698b77 --- /dev/null +++ b/third_party/python/pytest/_pytest/setupplan.py @@ -0,0 +1,25 @@ +from __future__ import absolute_import, division, print_function + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption('--setupplan', '--setup-plan', action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.") + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup(fixturedef, request): + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + fixturedef.cached_result = (None, None, None) + return fixturedef.cached_result + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True diff --git a/third_party/python/pytest/_pytest/skipping.py b/third_party/python/pytest/_pytest/skipping.py index 18e038d2c8497..5af1ca4040431 100644 --- a/third_party/python/pytest/_pytest/skipping.py +++ b/third_party/python/pytest/_pytest/skipping.py @@ -1,12 +1,14 @@ """ support for skip/xfail functions and markers. """ +from __future__ import absolute_import, division, print_function + import os import sys import traceback import py -import pytest +from _pytest.config import hookimpl from _pytest.mark import MarkInfo, MarkDecorator - +from _pytest.runner import fail, skip def pytest_addoption(parser): group = parser.getgroup("general") @@ -23,10 +25,14 @@ def pytest_addoption(parser): def pytest_configure(config): if config.option.runxfail: + # yay a hack + import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + def nop(*args, **kwargs): pass + nop.Exception = XFailed setattr(pytest, "xfail", nop) @@ -44,7 +50,7 @@ def nop(*args, **kwargs): ) config.addinivalue_line("markers", "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the the test function as an expected failure if eval(condition) " + "mark the test function as an expected failure if eval(condition) " "has a True value. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " @@ -53,11 +59,7 @@ def nop(*args, **kwargs): ) -def pytest_namespace(): - return dict(xfail=xfail) - - -class XFailed(pytest.fail.Exception): +class XFailed(fail.Exception): """ raised from an explicit call to pytest.xfail() """ @@ -65,6 +67,8 @@ def xfail(reason=""): """ xfail an executing test or setup functions with the given reason.""" __tracebackhide__ = True raise XFailed(reason) + + xfail.Exception = XFailed @@ -96,52 +100,47 @@ def istrue(self): except Exception: self.exc = sys.exc_info() if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^",] + msg = [" " * (self.exc[1].offset + 4) + "^", ] msg.append("SyntaxError: invalid syntax") else: msg = traceback.format_exception_only(*self.exc[:2]) - pytest.fail("Error evaluating %r expression\n" - " %s\n" - "%s" - %(self.name, self.expr, "\n".join(msg)), - pytrace=False) + fail("Error evaluating %r expression\n" + " %s\n" + "%s" + % (self.name, self.expr, "\n".join(msg)), + pytrace=False) def _getglobals(self): d = {'os': os, 'sys': sys, 'config': self.item.config} - func = self.item.obj - try: - d.update(func.__globals__) - except AttributeError: - d.update(func.func_globals) + if hasattr(self.item, 'obj'): + d.update(self.item.obj.__globals__) return d def _istrue(self): if hasattr(self, 'result'): return self.result if self.holder: - d = self._getglobals() if self.holder.args or 'condition' in self.holder.kwargs: self.result = False # "holder" might be a MarkInfo or a MarkDecorator; only # MarkInfo keeps track of all parameters it received in an # _arglist attribute - if hasattr(self.holder, '_arglist'): - arglist = self.holder._arglist - else: - arglist = [(self.holder.args, self.holder.kwargs)] - for args, kwargs in arglist: + marks = getattr(self.holder, '_marks', None) \ + or [self.holder.mark] + for _, args, kwargs in marks: if 'condition' in kwargs: args = (kwargs['condition'],) for expr in args: self.expr = expr if isinstance(expr, py.builtin._basestring): + d = self._getglobals() result = cached_eval(self.item.config, expr, d) else: if "reason" not in kwargs: # XXX better be checked at collection time msg = "you need to specify reason=STRING " \ "when using booleans as conditions." - pytest.fail(msg) + fail(msg) result = bool(expr) if result: self.result = True @@ -165,7 +164,7 @@ def getexplanation(self): return expl -@pytest.hookimpl(tryfirst=True) +@hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks @@ -174,23 +173,23 @@ def pytest_runtest_setup(item): eval_skipif = MarkEvaluator(item, 'skipif') if eval_skipif.istrue(): item._evalskip = eval_skipif - pytest.skip(eval_skipif.getexplanation()) + skip(eval_skipif.getexplanation()) skip_info = item.keywords.get('skip') if isinstance(skip_info, (MarkInfo, MarkDecorator)): item._evalskip = True if 'reason' in skip_info.kwargs: - pytest.skip(skip_info.kwargs['reason']) + skip(skip_info.kwargs['reason']) elif skip_info.args: - pytest.skip(skip_info.args[0]) + skip(skip_info.args[0]) else: - pytest.skip("unconditional skip") + skip("unconditional skip") item._evalxfail = MarkEvaluator(item, 'xfail') check_xfail_no_run(item) -@pytest.mark.hookwrapper +@hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) outcome = yield @@ -205,7 +204,7 @@ def check_xfail_no_run(item): evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get('run', True): - pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) + xfail("[NOTRUN] " + evalxfail.getexplanation()) def check_strict_xfail(pyfuncitem): @@ -217,10 +216,10 @@ def check_strict_xfail(pyfuncitem): if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() - pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) + fail('[XPASS(strict)] ' + explanation, pytrace=False) -@pytest.hookimpl(hookwrapper=True) +@hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() @@ -228,12 +227,19 @@ def pytest_runtest_makereport(item, call): evalskip = getattr(item, '_evalskip', None) # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess') and rep.when == "call": - # we need to translate into how pytest encodes xpass - rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) - rep.outcome = "failed" + from _pytest.compat import _is_unittest_unexpected_success_a_failure + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr elif item.config.option.runxfail: pass # don't interefere - elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ @@ -245,8 +251,15 @@ def pytest_runtest_makereport(item, call): rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": - rep.outcome = "failed" # xpass outcome - rep.wasxfail = evalxfail.getexplanation() + strict_default = item.config.getini('xfail_strict') + is_strict_xfail = evalxfail.get('strict', strict_default) + explanation = evalxfail.getexplanation() + if is_strict_xfail: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {0}".format(explanation) + else: + rep.outcome = "passed" + rep.wasxfail = explanation elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display @@ -260,7 +273,7 @@ def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: return "xfailed", "x", "xfail" - elif report.failed: + elif report.passed: return "xpassed", "X", ("XPASS", {'yellow': True}) # called by the terminalreporter instance/plugin @@ -294,12 +307,14 @@ def pytest_terminal_summary(terminalreporter): for line in lines: tr._tw.line(line) + def show_simple(terminalreporter, lines, stat, format): failed = terminalreporter.stats.get(stat) if failed: for rep in failed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - lines.append(format %(pos,)) + lines.append(format % (pos,)) + def show_xfailed(terminalreporter, lines): xfailed = terminalreporter.stats.get("xfailed") @@ -311,13 +326,15 @@ def show_xfailed(terminalreporter, lines): if reason: lines.append(" " + str(reason)) + def show_xpassed(terminalreporter, lines): xpassed = terminalreporter.stats.get("xpassed") if xpassed: for rep in xpassed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) reason = rep.wasxfail - lines.append("XPASS %s %s" %(pos, reason)) + lines.append("XPASS %s %s" % (pos, reason)) + def cached_eval(config, expr, d): if not hasattr(config, '_evalcache'): @@ -342,6 +359,7 @@ def folded_skips(skipped): l.append((len(events),) + key) return l + def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get('skipped', []) @@ -357,5 +375,6 @@ def show_skipped(terminalreporter, lines): for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] - lines.append("SKIP [%d] %s:%d: %s" % + lines.append( + "SKIP [%d] %s:%d: %s" % (num, fspath, lineno, reason)) diff --git a/third_party/python/pytest/_pytest/standalonetemplate.py b/third_party/python/pytest/_pytest/standalonetemplate.py deleted file mode 100755 index 484d5d1b25f0d..0000000000000 --- a/third_party/python/pytest/_pytest/standalonetemplate.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python - -# Hi There! -# You may be wondering what this giant blob of binary data here is, you might -# even be worried that we're up to something nefarious (good for you for being -# paranoid!). This is a base64 encoding of a zip file, this zip file contains -# a fully functional basic pytest script. -# -# Pytest is a thing that tests packages, pytest itself is a package that some- -# one might want to install, especially if they're looking to run tests inside -# some package they want to install. Pytest has a lot of code to collect and -# execute tests, and other such sort of "tribal knowledge" that has been en- -# coded in its code base. Because of this we basically include a basic copy -# of pytest inside this blob. We do this because it let's you as a maintainer -# or application developer who wants people who don't deal with python much to -# easily run tests without installing the complete pytest package. -# -# If you're wondering how this is created: you can create it yourself if you -# have a complete pytest installation by using this command on the command- -# line: ``py.test --genscript=runtests.py``. - -sources = """ -@SOURCES@""" - -import sys -import base64 -import zlib - -class DictImporter(object): - def __init__(self, sources): - self.sources = sources - - def find_module(self, fullname, path=None): - if fullname == "argparse" and sys.version_info >= (2,7): - # we were generated with = (3, 0): - exec("def do_exec(co, loc): exec(co, loc)\n") - import pickle - sources = sources.encode("ascii") # ensure bytes - sources = pickle.loads(zlib.decompress(base64.decodebytes(sources))) - else: - import cPickle as pickle - exec("def do_exec(co, loc): exec co in loc\n") - sources = pickle.loads(zlib.decompress(base64.decodestring(sources))) - - importer = DictImporter(sources) - sys.meta_path.insert(0, importer) - entry = "@ENTRY@" - do_exec(entry, locals()) # noqa diff --git a/third_party/python/pytest/_pytest/terminal.py b/third_party/python/pytest/_pytest/terminal.py index 825f553ef2cd6..af89d0fc2b201 100644 --- a/third_party/python/pytest/_pytest/terminal.py +++ b/third_party/python/pytest/_pytest/terminal.py @@ -2,6 +2,9 @@ This is a good source for looking at the various reporting hooks. """ +from __future__ import absolute_import, division, print_function + +import itertools from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED import pytest @@ -20,16 +23,18 @@ def pytest_addoption(parser): group._addoption('-q', '--quiet', action="count", dest="quiet", default=0, help="decrease verbosity."), group._addoption('-r', - action="store", dest="reportchars", default=None, metavar="chars", + action="store", dest="reportchars", default='', metavar="chars", help="show extra test summary info as specified by chars (f)ailed, " - "(E)error, (s)skipped, (x)failed, (X)passed (w)pytest-warnings " - "(p)passed, (P)passed with output, (a)all except pP.") + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set") + group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False, + dest='disable_warnings', action='store_true', + help='disable warnings summary') group._addoption('-l', '--showlocals', action="store_true", dest="showlocals", default=False, help="show locals in tracebacks (disabled by default).") - group._addoption('--report', - action="store", dest="report", default=None, metavar="opts", - help="(deprecated, use -r)") group._addoption('--tb', metavar="style", action="store", dest="tbstyle", default='auto', choices=['auto', 'long', 'short', 'no', 'line', 'native'], @@ -54,18 +59,11 @@ def mywriter(tags, args): def getreportopt(config): reportopts = "" - optvalue = config.option.report - if optvalue: - py.builtin.print_("DEPRECATED: use -r instead of --report option.", - file=sys.stderr) - if optvalue: - for setting in optvalue.split(","): - setting = setting.strip() - if setting == "skipped": - reportopts += "s" - elif setting == "xfailed": - reportopts += "x" reportchars = config.option.reportchars + if not config.option.disable_warnings and 'w' not in reportchars: + reportchars += 'w' + elif config.option.disable_warnings and 'w' in reportchars: + reportchars = reportchars.replace('w', '') if reportchars: for char in reportchars: if char not in reportopts and char != 'a': @@ -85,13 +83,40 @@ def pytest_report_teststatus(report): letter = "f" return report.outcome, letter, report.outcome.upper() + class WarningReport: + """ + Simple structure to hold warnings information captured by ``pytest_logwarning``. + """ def __init__(self, code, message, nodeid=None, fslocation=None): + """ + :param code: unused + :param str message: user friendly message about the warning + :param str|None nodeid: node id that generated the warning (see ``get_location``). + :param tuple|py.path.local fslocation: + file system location of the source of the warning (see ``get_location``). + """ self.code = code self.message = message self.nodeid = nodeid self.fslocation = fslocation + def get_location(self, config): + """ + Returns the more user-friendly information about the location + of a warning, or None. + """ + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = py.path.local(filename).relto(config.invocation_dir) + return '%s:%s' % (relpath, linenum) + else: + return str(self.fslocation) + return None + class TerminalReporter: def __init__(self, config, file=None): @@ -171,8 +196,6 @@ def pytest_internalerror(self, excrepr): def pytest_logwarning(self, code, fslocation, message, nodeid): warnings = self.stats.setdefault("warnings", []) - if isinstance(fslocation, tuple): - fslocation = "%s:%d" % fslocation warning = WarningReport(code=code, fslocation=fslocation, message=message, nodeid=nodeid) warnings.append(warning) @@ -259,7 +282,7 @@ def report_collect(self, final=False): line = "collected " else: line = "collecting " - line += str(self._numcollected) + " items" + line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's') if errors: line += " / %d errors" % errors if skipped: @@ -300,8 +323,8 @@ def pytest_sessionstart(self, session): def pytest_report_header(self, config): inifile = "" if config.inifile: - inifile = config.rootdir.bestrelpath(config.inifile) - lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)] + inifile = " " + config.rootdir.bestrelpath(config.inifile) + lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)] plugininfo = config.pluginmanager.list_plugin_distinfo() if plugininfo: @@ -366,7 +389,8 @@ def pytest_sessionfinish(self, exitstatus): EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED) if exitstatus in summary_exit_codes: - self.config.hook.pytest_terminal_summary(terminalreporter=self) + self.config.hook.pytest_terminal_summary(terminalreporter=self, + exitstatus=exitstatus) self.summary_errors() self.summary_failures() self.summary_warnings() @@ -442,13 +466,21 @@ def getreports(self, name): def summary_warnings(self): if self.hasopt("w"): - warnings = self.stats.get("warnings") - if not warnings: + all_warnings = self.stats.get("warnings") + if not all_warnings: return - self.write_sep("=", "pytest-warning summary") - for w in warnings: - self._tw.line("W%s %s %s" % (w.code, - w.fslocation, w.message)) + + grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config)) + + self.write_sep("=", "warnings summary", yellow=True, bold=False) + for location, warnings in grouped: + self._tw.line(str(location) or '') + for w in warnings: + lines = w.message.splitlines() + indented = '\n'.join(' ' + x for x in lines) + self._tw.line(indented) + self._tw.line() + self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html') def summary_passes(self): if self.config.option.tbstyle != "no": @@ -462,6 +494,15 @@ def summary_passes(self): self.write_sep("_", msg) self._outrep_summary(rep) + def print_teardown_sections(self, rep): + for secname, content in rep.sections: + if 'teardown' in secname: + self._tw.sep('-', secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self): if self.config.option.tbstyle != "no": reports = self.getreports('failed') @@ -477,6 +518,9 @@ def summary_failures(self): markup = {'red': True, 'bold': True} self.write_sep("_", msg, **markup) self._outrep_summary(rep) + for report in self.getreports(''): + if report.nodeid == rep.nodeid and report.when == 'teardown': + self.print_teardown_sections(report) def summary_errors(self): if self.config.option.tbstyle != "no": @@ -517,16 +561,8 @@ def summary_stats(self): def summary_deselected(self): if 'deselected' in self.stats: - l = [] - k = self.config.option.keyword - if k: - l.append("-k%s" % k) - m = self.config.option.markexpr - if m: - l.append("-m %r" % m) - if l: - self.write_sep("=", "%d tests deselected by %r" % ( - len(self.stats['deselected']), " ".join(l)), bold=True) + self.write_sep("=", "%d tests deselected" % ( + len(self.stats['deselected'])), bold=True) def repr_pythonversion(v=None): if v is None: @@ -546,8 +582,7 @@ def flatten(l): def build_summary_stats_line(stats): keys = ("failed passed skipped deselected " - "xfailed xpassed warnings error").split() - key_translation = {'warnings': 'pytest-warnings'} + "xfailed xpassed warnings error").split() unknown_key_seen = False for key in stats.keys(): if key not in keys: @@ -558,8 +593,7 @@ def build_summary_stats_line(stats): for key in keys: val = stats.get(key, None) if val: - key_name = key_translation.get(key, key) - parts.append("%d %s" % (len(val), key_name)) + parts.append("%d %s" % (len(val), key)) if parts: line = ", ".join(parts) diff --git a/third_party/python/pytest/_pytest/tmpdir.py b/third_party/python/pytest/_pytest/tmpdir.py index ebc48dbe5b24b..5960140595d08 100644 --- a/third_party/python/pytest/_pytest/tmpdir.py +++ b/third_party/python/pytest/_pytest/tmpdir.py @@ -1,9 +1,11 @@ """ support for providing temporary directories to test functions. """ +from __future__ import absolute_import, division, print_function + import re import pytest import py -from _pytest.monkeypatch import monkeypatch +from _pytest.monkeypatch import MonkeyPatch class TempdirFactory: @@ -81,6 +83,7 @@ def get_user(): except (ImportError, KeyError): return None + # backward compatibility TempdirHandler = TempdirFactory @@ -92,7 +95,7 @@ def pytest_configure(config): available at pytest_configure time, but ideally should be moved entirely to the tmpdir_factory session fixture. """ - mp = monkeypatch() + mp = MonkeyPatch() t = TempdirFactory(config) config._cleanup.extend([mp.undo, t.finish]) mp.setattr(config, '_tmpdirhandler', t, raising=False) @@ -108,14 +111,14 @@ def tmpdir_factory(request): @pytest.fixture def tmpdir(request, tmpdir_factory): - """return a temporary directory path object + """Return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. """ name = request.node.name - name = re.sub("[\W]", "_", name) + name = re.sub(r"[\W]", "_", name) MAXVAL = 30 if len(name) > MAXVAL: name = name[:MAXVAL] diff --git a/third_party/python/pytest/_pytest/unittest.py b/third_party/python/pytest/_pytest/unittest.py index 8120e94fbf41b..0cf0f1726af05 100644 --- a/third_party/python/pytest/_pytest/unittest.py +++ b/third_party/python/pytest/_pytest/unittest.py @@ -1,14 +1,15 @@ """ discovery and running of std-library "unittest" style tests. """ -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import sys import traceback -import pytest -# for transfering markers +# for transferring markers import _pytest._code -from _pytest.python import transfer_markers -from _pytest.skipping import MarkEvaluator +from _pytest.config import hookimpl +from _pytest.runner import fail, skip +from _pytest.python import transfer_markers, Class, Module, Function +from _pytest.skipping import MarkEvaluator, xfail def pytest_pycollect_makeitem(collector, name, obj): @@ -22,11 +23,11 @@ def pytest_pycollect_makeitem(collector, name, obj): return UnitTestCase(name, parent=collector) -class UnitTestCase(pytest.Class): +class UnitTestCase(Class): # marker for fixturemanger.getfixtureinfo() # to declare that our children do not support funcargs nofuncargs = True - + def setup(self): cls = self.obj if getattr(cls, '__unittest_skip__', False): @@ -46,10 +47,12 @@ def collect(self): return self.session._fixturemanager.parsefactories(self, unittest=True) loader = TestLoader() - module = self.getparent(pytest.Module).obj + module = self.getparent(Module).obj foundsomething = False for name in loader.getTestCaseNames(self.obj): x = getattr(self.obj, name) + if not getattr(x, '__test__', True): + continue funcobj = getattr(x, 'im_func', x) transfer_markers(funcobj, cls, module) yield TestCaseFunction(name, parent=self) @@ -63,8 +66,7 @@ def collect(self): yield TestCaseFunction('runTest', parent=self) - -class TestCaseFunction(pytest.Function): +class TestCaseFunction(Function): _excinfo = None def setup(self): @@ -92,6 +94,9 @@ def _fix_unittest_skip_decorator(self): def teardown(self): if hasattr(self._testcase, 'teardown_method'): self._testcase.teardown_method(self._obj) + # Allow garbage collection on TestCase instance attributes. + self._testcase = None + self._obj = None def startTest(self, testcase): pass @@ -106,36 +111,37 @@ def _addexcinfo(self, rawexcinfo): try: l = traceback.format_exception(*rawexcinfo) l.insert(0, "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n") - pytest.fail("".join(l), pytrace=False) - except (pytest.fail.Exception, KeyboardInterrupt): + "displaying natively:\n\n") + fail("".join(l), pytrace=False) + except (fail.Exception, KeyboardInterrupt): raise except: - pytest.fail("ERROR: Unknown Incompatible Exception " - "representation:\n%r" %(rawexcinfo,), pytrace=False) + fail("ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), pytrace=False) except KeyboardInterrupt: raise - except pytest.fail.Exception: + except fail.Exception: excinfo = _pytest._code.ExceptionInfo() self.__dict__.setdefault('_excinfo', []).append(excinfo) def addError(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) + def addFailure(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) def addSkip(self, testcase, reason): try: - pytest.skip(reason) - except pytest.skip.Exception: + skip(reason) + except skip.Exception: self._evalskip = MarkEvaluator(self, 'SkipTest') self._evalskip.result = True self._addexcinfo(sys.exc_info()) def addExpectedFailure(self, testcase, rawexcinfo, reason=""): try: - pytest.xfail(str(reason)) - except pytest.xfail.Exception: + xfail(str(reason)) + except xfail.Exception: self._addexcinfo(sys.exc_info()) def addUnexpectedSuccess(self, testcase, reason=""): @@ -147,17 +153,42 @@ def addSuccess(self, testcase): def stopTest(self, testcase): pass + def _handle_skip(self): + # implements the skipping machinery (see #2137) + # analog to pythons Lib/unittest/case.py:run + testMethod = getattr(self._testcase, self._testcase._testMethodName) + if (getattr(self._testcase.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or + getattr(testMethod, '__unittest_skip_why__', '')) + try: # PY3, unittest2 on PY2 + self._testcase._addSkip(self, self._testcase, skip_why) + except TypeError: # PY2 + if sys.version_info[0] != 2: + raise + self._testcase._addSkip(self, skip_why) + return True + return False + def runtest(self): - self._testcase(result=self) + if self.config.pluginmanager.get_plugin("pdbinvoke") is None: + self._testcase(result=self) + else: + # disables tearDown and cleanups for post mortem debugging (see #1890) + if self._handle_skip(): + return + self._testcase.debug() def _prunetraceback(self, excinfo): - pytest.Function._prunetraceback(self, excinfo) + Function._prunetraceback(self, excinfo) traceback = excinfo.traceback.filter( - lambda x:not x.frame.f_globals.get('__unittest')) + lambda x: not x.frame.f_globals.get('__unittest')) if traceback: excinfo.traceback = traceback -@pytest.hookimpl(tryfirst=True) + +@hookimpl(tryfirst=True) def pytest_runtest_makereport(item, call): if isinstance(item, TestCaseFunction): if item._excinfo: @@ -169,13 +200,15 @@ def pytest_runtest_makereport(item, call): # twisted trial support -@pytest.hookimpl(hookwrapper=True) + +@hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): if isinstance(item, TestCaseFunction) and \ 'twisted.trial.unittest' in sys.modules: ut = sys.modules['twisted.python.failure'] Failure__init__ = ut.Failure.__init__ check_testcase_implements_trial_reporter() + def excstore(self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None): if exc_value is None: @@ -189,6 +222,7 @@ def excstore(self, exc_value=None, exc_type=None, exc_tb=None, captureVars=captureVars) except TypeError: Failure__init__(self, exc_value, exc_type, exc_tb) + ut.Failure.__init__ = excstore yield ut.Failure.__init__ = Failure__init__ diff --git a/third_party/python/pytest/_pytest/vendored_packages/README.md b/third_party/python/pytest/_pytest/vendored_packages/README.md deleted file mode 100644 index eab7c714fb01e..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/README.md +++ /dev/null @@ -1,13 +0,0 @@ -This directory vendors the `pluggy` module. - -For a more detailed discussion for the reasons to vendoring this -package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944). - -To update the current version, execute: - -``` -$ pip install -U pluggy== --no-compile --target=_pytest/vendored_packages -``` - -And commit the modified files. The `pluggy-.dist-info` directory -created by `pip` should be ignored. diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst deleted file mode 100644 index aa3bbf8129755..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,10 +0,0 @@ -Plugin registration and hook calling for Python -=============================================== - -This is the plugin manager as used by pytest but stripped -of pytest specific details. - -During the 0.x series this plugin does not have much documentation -except extensive docstrings in the pluggy.py module. - - diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA deleted file mode 100644 index ec81f0a6be0ae..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA +++ /dev/null @@ -1,39 +0,0 @@ -Metadata-Version: 2.0 -Name: pluggy -Version: 0.3.1 -Summary: plugin and hook calling mechanisms for python -Home-page: UNKNOWN -Author: Holger Krekel -Author-email: holger at merlinux.eu -License: MIT license -Platform: unix -Platform: linux -Platform: osx -Platform: win32 -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: MacOS :: MacOS X -Classifier: Topic :: Software Development :: Testing -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Utilities -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 - -Plugin registration and hook calling for Python -=============================================== - -This is the plugin manager as used by pytest but stripped -of pytest specific details. - -During the 0.x series this plugin does not have much documentation -except extensive docstrings in the pluggy.py module. - - diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD deleted file mode 100644 index 9626673c43ce2..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD +++ /dev/null @@ -1,8 +0,0 @@ -pluggy.py,sha256=v_RfWzyW6DPU1cJu_EFoL_OHq3t13qloVdR6UaMCXQA,29862 -pluggy-0.3.1.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7 -pluggy-0.3.1.dist-info/pbr.json,sha256=xX3s6__wOcAyF-AZJX1sdZyW6PUXT-FkfBlM69EEUCg,47 -pluggy-0.3.1.dist-info/RECORD,, -pluggy-0.3.1.dist-info/metadata.json,sha256=nLKltOT78dMV-00uXD6Aeemp4xNsz2q59j6ORSDeLjw,1027 -pluggy-0.3.1.dist-info/METADATA,sha256=1b85Ho2u4iK30M099k7axMzcDDhLcIMb-A82JUJZnSo,1334 -pluggy-0.3.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 -pluggy-0.3.1.dist-info/DESCRIPTION.rst,sha256=P5Akh1EdIBR6CeqtV2P8ZwpGSpZiTKPw0NyS7jEiD-g,306 diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL deleted file mode 100644 index 9dff69d86102d..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.24.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json deleted file mode 100644 index 426a3a7ade148..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"license": "MIT license", "name": "pluggy", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "plugin and hook calling mechanisms for python", "platform": "unix", "version": "0.3.1", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "holger at merlinux.eu", "name": "Holger Krekel"}]}}, "classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"]} \ No newline at end of file diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json deleted file mode 100644 index d6b7986401959..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json +++ /dev/null @@ -1 +0,0 @@ -{"is_release": false, "git_version": "7d4c9cd"} \ No newline at end of file diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt b/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt deleted file mode 100644 index 11bdb5c1f5fcd..0000000000000 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pluggy diff --git a/third_party/python/pytest/_pytest/vendored_packages/pluggy.py b/third_party/python/pytest/_pytest/vendored_packages/pluggy.py index 2f848b23d35ed..aebddad01daea 100644 --- a/third_party/python/pytest/_pytest/vendored_packages/pluggy.py +++ b/third_party/python/pytest/_pytest/vendored_packages/pluggy.py @@ -67,8 +67,9 @@ import sys import inspect -__version__ = '0.3.1' -__all__ = ["PluginManager", "PluginValidationError", +__version__ = '0.4.0' + +__all__ = ["PluginManager", "PluginValidationError", "HookCallError", "HookspecMarker", "HookimplMarker"] _py3 = sys.version_info > (3, 0) @@ -308,7 +309,7 @@ class PluginManager(object): """ Core Pluginmanager class which manages registration of plugin objects and 1:N hook calling. - You can register new hooks by calling ``addhooks(module_or_class)``. + You can register new hooks by calling ``add_hookspec(module_or_class)``. You can register plugin objects (which contain hooks) by calling ``register(plugin)``. The Pluginmanager is initialized with a prefix that is searched for in the names of the dict of registered @@ -374,7 +375,10 @@ def register(self, plugin, name=None): def parse_hookimpl_opts(self, plugin, name): method = getattr(plugin, name) - res = getattr(method, self.project_name + "_impl", None) + try: + res = getattr(method, self.project_name + "_impl", None) + except Exception: + res = {} if res is not None and not isinstance(res, dict): # false positive res = None @@ -455,6 +459,10 @@ def get_plugin(self, name): """ Return a plugin or None for the given name. """ return self._name2plugin.get(name) + def has_plugin(self, name): + """ Return True if a plugin with the given name is registered. """ + return self.get_plugin(name) is not None + def get_name(self, plugin): """ Return name for registered plugin or None if not registered. """ for name, val in self._name2plugin.items(): @@ -492,7 +500,8 @@ def check_pending(self): def load_setuptools_entrypoints(self, entrypoint_name): """ Load modules from querying the specified setuptools entrypoint name. Return the number of loaded plugins. """ - from pkg_resources import iter_entry_points, DistributionNotFound + from pkg_resources import (iter_entry_points, DistributionNotFound, + VersionConflict) for ep in iter_entry_points(entrypoint_name): # is the plugin registered or blocked? if self.get_plugin(ep.name) or self.is_blocked(ep.name): @@ -501,6 +510,9 @@ def load_setuptools_entrypoints(self, entrypoint_name): plugin = ep.load() except DistributionNotFound: continue + except VersionConflict as e: + raise PluginValidationError( + "Plugin %r could not be loaded: %s!" % (ep.name, e)) self.register(plugin, name=ep.name) self._plugin_distinfo.append((plugin, ep.dist)) return len(self._plugin_distinfo) @@ -528,7 +540,7 @@ def add_hookcall_monitoring(self, before, after): of HookImpl instances and the keyword arguments for the hook call. ``after(outcome, hook_name, hook_impls, kwargs)`` receives the - same arguments as ``before`` but also a :py:class:`_CallOutcome`` object + same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object which represents the result of the overall hook call. """ return _TracedHookExecution(self, before, after).undo @@ -573,7 +585,7 @@ class _MultiCall: # XXX note that the __multicall__ argument is supported only # for pytest compatibility reasons. It was never officially - # supported there and is explicitly deprecated since 2.8 + # supported there and is explicitely deprecated since 2.8 # so we can remove it soon, allowing to avoid the below recursion # in execute() and simplify/speed up the execute loop. @@ -590,7 +602,13 @@ def execute(self): while self.hook_impls: hook_impl = self.hook_impls.pop() - args = [all_kwargs[argname] for argname in hook_impl.argnames] + try: + args = [all_kwargs[argname] for argname in hook_impl.argnames] + except KeyError: + for argname in hook_impl.argnames: + if argname not in all_kwargs: + raise HookCallError( + "hook call must provide argument %r" % (argname,)) if hook_impl.hookwrapper: return _wrapped_call(hook_impl.function(*args), self.execute) res = hook_impl.function(*args) @@ -629,7 +647,10 @@ def varnames(func, startindex=None): startindex = 1 else: if not inspect.isfunction(func) and not inspect.ismethod(func): - func = getattr(func, '__call__', func) + try: + func = getattr(func, '__call__', func) + except Exception: + return () if startindex is None: startindex = int(inspect.ismethod(func)) @@ -763,6 +784,10 @@ class PluginValidationError(Exception): """ plugin failed validation. """ +class HookCallError(Exception): + """ Hook was called wrongly. """ + + if hasattr(inspect, 'signature'): def _formatdef(func): return "%s%s" % ( diff --git a/third_party/python/pytest/_pytest/warnings.py b/third_party/python/pytest/_pytest/warnings.py new file mode 100644 index 0000000000000..4fe28bd315b1f --- /dev/null +++ b/third_party/python/pytest/_pytest/warnings.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import, division, print_function + +import warnings +from contextlib import contextmanager + +import pytest + +from _pytest import compat + + +def _setoption(wmod, arg): + """ + Copy of the warning._setoption function but does not escape arguments. + """ + parts = arg.split(':') + if len(parts) > 5: + raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append('') + action, message, category, module, lineno = [s.strip() + for s in parts] + action = wmod._getaction(action) + category = wmod._getcategory(category) + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise wmod._OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + wmod.filterwarnings(action, message, category, module, lineno) + + +def pytest_addoption(parser): + group = parser.getgroup("pytest-warnings") + group.addoption( + '-W', '--pythonwarnings', action='append', + help="set which warnings to report, see -W option of python itself.") + parser.addini("filterwarnings", type="linelist", + help="Each line specifies warning filter pattern which would be passed" + "to warnings.filterwarnings. Process after -W and --pythonwarnings.") + + +@contextmanager +def catch_warnings_for_item(item): + """ + catches the warnings generated during setup/call/teardown execution + of the given item and after it is done posts them as warnings to this + item. + """ + args = item.config.getoption('pythonwarnings') or [] + inifilters = item.config.getini("filterwarnings") + with warnings.catch_warnings(record=True) as log: + for arg in args: + warnings._setoption(arg) + + for arg in inifilters: + _setoption(warnings, arg) + + yield + + for warning in log: + warn_msg = warning.message + unicode_warning = False + + if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): + new_args = [compat.safe_str(m) for m in warn_msg.args] + unicode_warning = warn_msg.args != new_args + warn_msg.args = new_args + + msg = warnings.formatwarning( + warn_msg, warning.category, + warning.filename, warning.lineno, warning.line) + item.warn("unused", msg) + + if unicode_warning: + warnings.warn( + "Warning is using unicode non convertible to ascii, " + "converting to a safe representation:\n %s" % msg, + UnicodeWarning) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + with catch_warnings_for_item(item): + yield diff --git a/third_party/python/pytest/pytest.py b/third_party/python/pytest/pytest.py index e376e417e8a33..4e4ccb32dd4c1 100644 --- a/third_party/python/pytest/pytest.py +++ b/third_party/python/pytest/pytest.py @@ -2,6 +2,32 @@ """ pytest: unit and functional testing with Python. """ + + +# else we are imported + +from _pytest.config import ( + main, UsageError, _preloadplugins, cmdline, + hookspec, hookimpl +) +from _pytest.fixtures import fixture, yield_fixture +from _pytest.assertion import register_assert_rewrite +from _pytest.freeze_support import freeze_includes +from _pytest import __version__ +from _pytest.debugging import pytestPDB as __pytestPDB +from _pytest.recwarn import warns, deprecated_call +from _pytest.runner import fail, skip, importorskip, exit +from _pytest.mark import MARK_GEN as mark, param +from _pytest.skipping import xfail +from _pytest.main import Item, Collector, File, Session +from _pytest.fixtures import fillfixtures as _fillfuncargs +from _pytest.python import ( + raises, approx, + Module, Class, Instance, Function, Generator, +) + +set_trace = __pytestPDB.set_trace + __all__ = [ 'main', 'UsageError', @@ -9,20 +35,44 @@ 'hookspec', 'hookimpl', '__version__', + 'register_assert_rewrite', + 'freeze_includes', + 'set_trace', + 'warns', + 'deprecated_call', + 'fixture', + 'yield_fixture', + 'fail', + 'skip', + 'xfail', + 'importorskip', + 'exit', + 'mark', + 'param', + 'approx', + '_fillfuncargs', + + 'Item', + 'File', + 'Collector', + 'Session', + 'Module', + 'Class', + 'Instance', + 'Function', + 'Generator', + 'raises', + + ] -if __name__ == '__main__': # if run as a script or by 'python -m pytest' +if __name__ == '__main__': + # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import import pytest raise SystemExit(pytest.main()) +else: -# else we are imported - -from _pytest.config import ( - main, UsageError, _preloadplugins, cmdline, - hookspec, hookimpl -) -from _pytest import __version__ - -_preloadplugins() # to populate pytest.* namespace so help(pytest) works - + from _pytest.compat import _setup_collect_fakemodule + _preloadplugins() # to populate pytest.* namespace so help(pytest) works + _setup_collect_fakemodule() diff --git a/third_party/python/pytest/setup.cfg b/third_party/python/pytest/setup.cfg deleted file mode 100644 index bec4469e08396..0000000000000 --- a/third_party/python/pytest/setup.cfg +++ /dev/null @@ -1,19 +0,0 @@ -[build_sphinx] -source-dir = doc/en/ -build-dir = doc/build -all_files = 1 - -[upload_sphinx] -upload-dir = doc/en/build/html - -[bdist_wheel] -universal = 1 - -[devpi:upload] -formats = sdist.tgz,bdist_wheel - -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - diff --git a/third_party/python/pytest/setup.py b/third_party/python/pytest/setup.py deleted file mode 100644 index 7cdcdfb9927ac..0000000000000 --- a/third_party/python/pytest/setup.py +++ /dev/null @@ -1,122 +0,0 @@ -import os, sys -import setuptools -import pkg_resources -from setuptools import setup, Command - -classifiers = ['Development Status :: 6 - Mature', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS :: MacOS X', - 'Topic :: Software Development :: Testing', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities'] + [ - ('Programming Language :: Python :: %s' % x) for x in - '2 2.6 2.7 3 3.2 3.3 3.4 3.5'.split()] - -with open('README.rst') as fd: - long_description = fd.read() - -def get_version(): - p = os.path.join(os.path.dirname( - os.path.abspath(__file__)), "_pytest", "__init__.py") - with open(p) as f: - for line in f.readlines(): - if "__version__" in line: - return line.strip().split("=")[-1].strip(" '") - raise ValueError("could not read version") - - -def has_environment_marker_support(): - """ - Tests that setuptools has support for PEP-426 environment marker support. - - The first known release to support it is 0.7 (and the earliest on PyPI seems to be 0.7.2 - so we're using that), see: http://pythonhosted.org/setuptools/history.html#id142 - - References: - - * https://wheel.readthedocs.io/en/latest/index.html#defining-conditional-dependencies - * https://www.python.org/dev/peps/pep-0426/#environment-markers - """ - try: - return pkg_resources.parse_version(setuptools.__version__) >= pkg_resources.parse_version('0.7.2') - except Exception as exc: - sys.stderr.write("Could not test setuptool's version: %s\n" % exc) - return False - - -def main(): - install_requires = ['py>=1.4.29'] # pluggy is vendored in _pytest.vendored_packages - extras_require = {} - if has_environment_marker_support(): - extras_require[':python_version=="2.6" or python_version=="3.0" or python_version=="3.1"'] = ['argparse'] - extras_require[':sys_platform=="win32"'] = ['colorama'] - else: - if sys.version_info < (2, 7) or (3,) <= sys.version_info < (3, 2): - install_requires.append('argparse') - if sys.platform == 'win32': - install_requires.append('colorama') - - setup( - name='pytest', - description='pytest: simple powerful testing with Python', - long_description=long_description, - version=get_version(), - url='http://pytest.org', - license='MIT license', - platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], - author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others', - author_email='holger at merlinux.eu', - entry_points=make_entry_points(), - classifiers=classifiers, - cmdclass={'test': PyTest}, - # the following should be enabled for release - install_requires=install_requires, - extras_require=extras_require, - packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'], - py_modules=['pytest'], - zip_safe=False, - ) - - -def cmdline_entrypoints(versioninfo, platform, basename): - target = 'pytest:main' - if platform.startswith('java'): - points = {'py.test-jython': target} - else: - if basename.startswith('pypy'): - points = {'py.test-%s' % basename: target} - else: # cpython - points = {'py.test-%s.%s' % versioninfo[:2] : target} - points['py.test'] = target - return points - - -def make_entry_points(): - basename = os.path.basename(sys.executable) - points = cmdline_entrypoints(sys.version_info, sys.platform, basename) - keys = list(points.keys()) - keys.sort() - l = ['%s = %s' % (x, points[x]) for x in keys] - return {'console_scripts': l} - - -class PyTest(Command): - user_options = [] - def initialize_options(self): - pass - def finalize_options(self): - pass - def run(self): - import subprocess - PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x] - PPATH.insert(0, os.getcwd()) - os.environ['PYTHONPATH'] = ':'.join(PPATH) - errno = subprocess.call([sys.executable, 'pytest.py', '--ignore=doc']) - raise SystemExit(errno) - - -if __name__ == '__main__': - main() From 261016cb16e29245279b0c5000f0acd0fed5bbc9 Mon Sep 17 00:00:00 2001 From: John Dorlus Date: Thu, 22 Jun 2017 18:13:14 -0400 Subject: [PATCH 012/152] Bug 1358670 - Added requirements and mozharness script r=ahal Added cli_script attribute to TelemetryClientTests due to test failure Added telemetry test requirements file to /testing/config Added mozharness script to run telemetry tests from checkout MozReview-Commit-ID: AJKM7b1OcVW --HG-- extra : rebase_source : 8147ad3decaa94c28ba48b87310b4a00d5a90fd2 --- .../config/telemetry_tests_requirements.txt | 9 + .../scripts/telemetry/telemetry_client.py | 210 ++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100644 testing/config/telemetry_tests_requirements.txt create mode 100755 testing/mozharness/scripts/telemetry/telemetry_client.py diff --git a/testing/config/telemetry_tests_requirements.txt b/testing/config/telemetry_tests_requirements.txt new file mode 100644 index 0000000000000..297ffec6a3d8b --- /dev/null +++ b/testing/config/telemetry_tests_requirements.txt @@ -0,0 +1,9 @@ +-r mozbase_requirements.txt + +../marionette/client/ +../marionette/harness/ + +# Allows to use the Puppeteer page object model for Firefox +../marionette/puppeteer/firefox/ + +../../toolkit/components/telemetry/tests/marionette/harness \ No newline at end of file diff --git a/testing/mozharness/scripts/telemetry/telemetry_client.py b/testing/mozharness/scripts/telemetry/telemetry_client.py new file mode 100755 index 0000000000000..88a95f02e89bb --- /dev/null +++ b/testing/mozharness/scripts/telemetry/telemetry_client.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# ***** BEGIN LICENSE BLOCK ***** +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. +# ***** END LICENSE BLOCK ***** + + +import copy +import os +import sys + +# load modules from parent dir +sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0]))) + +GECKO_SRCDIR = os.path.join(os.path.expanduser('~'), 'checkouts', 'gecko') + +TELEMETRY_TEST_HOME = os.path.join(GECKO_SRCDIR, 'toolkit', 'components', 'telemetry', + 'tests', 'marionette') + +from mozharness.base.python import PostScriptRun, PreScriptAction +from mozharness.mozilla.structuredlog import StructuredOutputParser +from mozharness.mozilla.testing.testbase import ( + TestingMixin, + testing_config_options, +) +from mozharness.mozilla.testing.codecoverage import ( + CodeCoverageMixin, + code_coverage_config_options +) +from mozharness.mozilla.vcstools import VCSToolsScript + +# General command line arguments for Firefox ui tests +telemetry_tests_config_options = [ + [["--allow-software-gl-layers"], { + "action": "store_true", + "dest": "allow_software_gl_layers", + "default": False, + "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.", + }], + [["--enable-webrender"], { + "action": "store_true", + "dest": "enable_webrender", + "default": False, + "help": "Tries to enable the WebRender compositor.", + }], + [['--dry-run'], { + 'dest': 'dry_run', + 'default': False, + 'help': 'Only show what was going to be tested.', + }], + [["--e10s"], { + 'dest': 'e10s', + 'action': 'store_true', + 'default': False, + 'help': 'Enable multi-process (e10s) mode when running tests.', + }], + [['--symbols-path=SYMBOLS_PATH'], { + 'dest': 'symbols_path', + 'help': 'absolute path to directory containing breakpad ' + 'symbols, or the url of a zip file containing symbols.', + }], + [['--tag=TAG'], { + 'dest': 'tag', + 'help': 'Subset of tests to run (local, remote).', + }], +] + copy.deepcopy(testing_config_options) \ + + copy.deepcopy(code_coverage_config_options) + + +class TelemetryTests(TestingMixin, VCSToolsScript, CodeCoverageMixin): + def __init__(self, config_options=None, + all_actions=None, default_actions=None, + *args, **kwargs): + config_options = config_options or telemetry_tests_config_options + actions = [ + 'clobber', + 'download-and-extract', + 'create-virtualenv', + 'install', + 'run-tests', + 'uninstall', + ] + + super(TelemetryTests, self).__init__( + config_options=config_options, + all_actions=all_actions or actions, + default_actions=default_actions or actions, + *args, **kwargs) + + # Code which doesn't run on buildbot has to include the following properties + self.binary_path = self.config.get('binary_path') + self.installer_path = self.config.get('installer_path') + self.installer_url = self.config.get('installer_url') + self.test_packages_url = self.config.get('test_packages_url') + self.test_url = self.config.get('test_url') + + if not self.test_url and not self.test_packages_url: + self.fatal( + 'You must use --test-url, or --test-packages-url') + + @PreScriptAction('create-virtualenv') + def _pre_create_virtualenv(self, action): + dirs = self.query_abs_dirs() + + requirements = os.path.join(GECKO_SRCDIR, 'testing', + 'config', 'telemetry_tests_requirements.txt') + self.register_virtualenv_module(requirements=[requirements], two_pass=True) + + def query_abs_dirs(self): + if self.abs_dirs: + return self.abs_dirs + + abs_dirs = super(TelemetryTests, self).query_abs_dirs() + + dirs = { + 'abs_blob_upload_dir': os.path.join(abs_dirs['abs_work_dir'], 'blobber_upload_dir'), + 'abs_telemetry_dir': TELEMETRY_TEST_HOME, + } + + for key in dirs: + if key not in abs_dirs: + abs_dirs[key] = dirs[key] + self.abs_dirs = abs_dirs + + return self.abs_dirs + + def run_test(self, binary_path, env=None, marionette_port=2828): + """All required steps for running the tests against an installer.""" + dirs = self.query_abs_dirs() + + # Import the harness to retrieve the location of the cli scripts + import telemetry_harness + + cmd = [ + self.query_python_path(), + os.path.join(os.path.dirname(telemetry_harness.__file__), + self.cli_script), + '--binary', binary_path, + '--address', 'localhost:{}'.format(marionette_port), + + # Resource files to serve via local webserver + '--server-root', os.path.join(dirs['abs_telemetry_dir'], 'harness', 'www'), + # Use the work dir to get temporary data stored + '--workspace', dirs['abs_work_dir'], + # logging options + '--gecko-log=-', # output from the gecko process redirected to stdout + '--log-raw=-', # structured log for output parser redirected to stdout + # additional reports helpful for Jenkins and inpection via Treeherder + '--log-html', os.path.join(dirs['abs_blob_upload_dir'], 'report.html'), + '--log-xunit', os.path.join(dirs['abs_blob_upload_dir'], 'report.xml'), + # Enable tracing output to log transmission protocol + '-vv', + ] + + parser = StructuredOutputParser(config=self.config, + log_obj=self.log_obj, + strict=False) + + # Add the default tests to run + tests = [os.path.join(dirs['abs_telemetry_dir'], 'tests', test) for test in self.default_tests] + cmd.extend(tests) + + # Set further environment settings + env = env or self.query_env() + env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']}) + if self.query_minidump_stackwalk(): + env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path}) + env['RUST_BACKTRACE'] = '1' + + # If code coverage is enabled, set GCOV_PREFIX env variable + if self.config.get('code_coverage'): + env['GCOV_PREFIX'] = self.gcov_dir + + return_code = self.run_command(cmd, + cwd=dirs['abs_work_dir'], + output_timeout=300, + output_parser=parser, + env=env) + + tbpl_status, log_level = parser.evaluate_parser(return_code) + self.buildbot_status(tbpl_status, level=log_level) + + return return_code + + @PreScriptAction('run-tests') + def _pre_run_tests(self, action): + if not self.installer_path and not self.installer_url: + self.critical('Please specify an installer via --installer-path or --installer-url.') + sys.exit(1) + + def run_tests(self): + """Run all the tests""" + return self.run_test( + binary_path=self.binary_path, + env=self.query_env(), + ) + + +class TelemetryClientTests(TelemetryTests): + cli_script = 'runtests.py' + default_tests = [ + os.path.join('client', 'manifest.ini'), + os.path.join('unit', 'manifest.ini'), + ] + + +if __name__ == '__main__': + myScript = TelemetryClientTests() + myScript.run_and_exit() From bcf6ef6a17908dbf7db84ecb9c206a2d78fb0ef3 Mon Sep 17 00:00:00 2001 From: John Dorlus Date: Thu, 22 Jun 2017 18:16:29 -0400 Subject: [PATCH 013/152] Bug 1358670 - add telemetry-harness jobs to CI r=dustin MozReview-Commit-ID: 9tPAMBAkvCs Added config via tests.yml, test-sets.yml Added remove_installer to config for linux. Added blank for windows as that will come later. --HG-- extra : rebase_source : 06a454308010909512cb4e9e8106d851dd34cfae --- taskcluster/ci/test/test-sets.yml | 1 + taskcluster/ci/test/tests.yml | 18 ++++++++++++++++++ taskcluster/taskgraph/transforms/task.py | 2 ++ 3 files changed, 21 insertions(+) diff --git a/taskcluster/ci/test/test-sets.yml b/taskcluster/ci/test/test-sets.yml index 1e23873200158..fed49d0e31263 100644 --- a/taskcluster/ci/test/test-sets.yml +++ b/taskcluster/ci/test/test-sets.yml @@ -36,6 +36,7 @@ common-tests: - mochitest-webgl - reftest - reftest-no-accel + - telemetry-tests-client - xpcshell web-platform-tests: diff --git a/taskcluster/ci/test/tests.yml b/taskcluster/ci/test/tests.yml index 32c698a26725e..ce8f13534fbfb 100644 --- a/taskcluster/ci/test/tests.yml +++ b/taskcluster/ci/test/tests.yml @@ -1509,6 +1509,24 @@ talos-tp5o: - --add-option - --webServer,localhost +telemetry-tests-client: + description: "Telemetry tests client run" + suite: telemetry-tests-client + treeherder-symbol: tc-e10s + max-run-time: 5400 + checkout: true + e10s: true + tier: 2 + docker-image: {"in-tree": "desktop1604-test"} + mozharness: + script: telemetry/telemetry_client.py + config: + by-test-platform: + linux.*: + - remove_executables.py + windows.*: [] + + web-platform-tests: description: "Web platform test run" suite: web-platform-tests diff --git a/taskcluster/taskgraph/transforms/task.py b/taskcluster/taskgraph/transforms/task.py index f8fd73be0e5b7..020336e02fde0 100644 --- a/taskcluster/taskgraph/transforms/task.py +++ b/taskcluster/taskgraph/transforms/task.py @@ -444,6 +444,8 @@ 'tc-R-e10s': 'Reftests executed by TaskCluster with e10s', 'tc-T': 'Talos performance tests executed by TaskCluster', 'tc-T-e10s': 'Talos performance tests executed by TaskCluster with e10s', + 'tc-tt-c': 'Telemetry client marionette tests', + 'tc-tt-c-e10s': 'Telemetry client marionette tests with e10s', 'tc-SY-e10s': 'Are we slim yet tests by TaskCluster with e10s', 'tc-VP': 'VideoPuppeteer tests executed by TaskCluster', 'tc-W': 'Web platform tests executed by TaskCluster', From 9dd142d73fd701837ce3c077c8a380cd7618198e Mon Sep 17 00:00:00 2001 From: John Dorlus Date: Thu, 22 Jun 2017 18:18:24 -0400 Subject: [PATCH 014/152] Bug 1358670 - Implemented MarionetteHarness in telemetry-harness r=whimboo Added changes to testcase.py for change server root Added testvar to pass server_root from command line of telemetry-harness to testcase class. Added argument.py and runner.py to implement MarionetteHarness in the test job Changed test name in manifest.ini MozReview-Commit-ID: 4XpBvq6Skz6 --HG-- extra : rebase_source : ab3f9d8ce95186fb9fba0eda0c4624eed837f298 --- .../tests/marionette/harness/MANIFEST.in | 2 ++ .../tests/marionette/harness/__init__.py | 3 --- .../harness/telemetry_harness/runner.py | 20 +++++++++++++++++++ .../harness/telemetry_harness/runtests.py | 7 ++++--- .../harness/telemetry_harness/testcase.py | 20 +++++++++---------- .../marionette/tests/client/manifest.ini | 2 +- 6 files changed, 37 insertions(+), 17 deletions(-) create mode 100644 toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in delete mode 100644 toolkit/components/telemetry/tests/marionette/harness/__init__.py create mode 100644 toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py diff --git a/toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in b/toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in new file mode 100644 index 0000000000000..5e9d175afc7f5 --- /dev/null +++ b/toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in @@ -0,0 +1,2 @@ +exclude MANIFEST.in +include requirements.txt \ No newline at end of file diff --git a/toolkit/components/telemetry/tests/marionette/harness/__init__.py b/toolkit/components/telemetry/tests/marionette/harness/__init__.py deleted file mode 100644 index 6fbe8159b2db1..0000000000000 --- a/toolkit/components/telemetry/tests/marionette/harness/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py new file mode 100644 index 0000000000000..99a1568154f02 --- /dev/null +++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py @@ -0,0 +1,20 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +from marionette_harness import BaseMarionetteTestRunner +from testcase import TelemetryTestCase + + +class TelemetryTestRunner(BaseMarionetteTestRunner): + + def __init__(self, **kwargs): + super(TelemetryTestRunner, self).__init__(**kwargs) + self.server_root = kwargs.pop('server_root') + self.testvars['server_root'] = self.server_root + + # select the appropriate GeckoInstance + self.app = 'fxdesktop' + + self.test_handlers = [TelemetryTestCase] diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py index 7d14ef2f14c71..9d9e2af1de1c6 100755 --- a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py +++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py @@ -3,11 +3,12 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. from marionette_harness.runtests import cli as mn_cli -from testcase import TelemetryTestCase +from runner import TelemetryTestRunner -def cli(): - mn_cli(testcase_class=TelemetryTestCase) +def cli(args=None): + mn_cli(runner_class=TelemetryTestRunner, + args=args) if __name__ == '__main__': diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py index 1d55fc6998367..d55f19026a18c 100755 --- a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py +++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py @@ -16,20 +16,17 @@ from marionette_harness import MarionetteTestCase from marionette_harness.runner import httpd -here = os.path.abspath(os.path.dirname(__file__)) -doc_root = os.path.join(os.path.dirname(here), "www") -resources_dir = os.path.join(os.path.dirname(here), "resources") - class TelemetryTestCase(PuppeteerMixin, MarionetteTestCase): ping_list = [] - def setUp(self, *args, **kwargs): - super(TelemetryTestCase, self).setUp() + def __init__(self, *args, **kwargs): + super(TelemetryTestCase, self).__init__(*args, **kwargs) - # Start and configure server - self.httpd = httpd.FixtureServer(doc_root) + def setUp(self, *args, **kwargs): + super(TelemetryTestCase, self).setUp(*args, **kwargs) + self.httpd = httpd.FixtureServer(self.testvars['server_root']) ping_route = [("POST", re.compile('/pings'), self.pings)] self.httpd.routes.extend(ping_route) self.httpd.start() @@ -51,9 +48,11 @@ def setUp(self, *args, **kwargs): self.marionette.enforce_gecko_prefs(telemetry_prefs) def wait_for_ping(self, ping_filter_func): + current_ping_list_size = len(self.ping_list) if len(self.ping_list) == 0: try: - Wait(self.marionette, 60).until(lambda t: len(self.ping_list) > 0) + Wait(self.marionette, 60).until(lambda _: + len(self.ping_list) > current_ping_list_size) except Exception as e: self.fail('Error generating ping: {}'.format(e.message)) @@ -81,7 +80,8 @@ def _install_addon(self): # Developed by: MozillaOnline # Addon URL: https://addons.mozilla.org/en-US/firefox/addon/easyscreenshot/ try: - addon_path = os.path.join(resources_dir, 'easyscreenshot.xpi') + # TODO: Replace Resources_dir with default directory + addon_path = os.path.join('resources_dir', 'easyscreenshot.xpi') addons = Addons(self.marionette) addons.install(addon_path) except MarionetteException as e: diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini b/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini index 2cd6999d1e5f0..58e5bda7434f6 100644 --- a/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini +++ b/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini @@ -1,4 +1,4 @@ [DEFAULT] tags = client -[test_main_ping_addon_install_tab_window_scalars.py] \ No newline at end of file +[test_main_tab_scalars.py] \ No newline at end of file From 310427becca308786355b0a48c1a79e932fed833 Mon Sep 17 00:00:00 2001 From: Julian Descottes Date: Tue, 18 Jul 2017 23:41:03 +0200 Subject: [PATCH 015/152] Bug 1374555 - update HTMLTooltip stylesheets to use more specific selectors;r=gl Scoped stylesheets will stop being supported for chrome content per Bug 1345702. Update the selectors in the stylesheets so that they can easily be loaded without the scope attribute. MozReview-Commit-ID: FV4tWD4SlYA --HG-- extra : rebase_source : 63f2fce76cb6f1e7052e6e421a256c0ea8f77852 --- .../client/shared/widgets/cubic-bezier.css | 45 +++++++------ .../client/shared/widgets/filter-widget.css | 67 ++++++++++--------- devtools/client/shared/widgets/mdn-docs.css | 2 +- 3 files changed, 58 insertions(+), 56 deletions(-) diff --git a/devtools/client/shared/widgets/cubic-bezier.css b/devtools/client/shared/widgets/cubic-bezier.css index 203fe336a6e48..7a808dc86222f 100644 --- a/devtools/client/shared/widgets/cubic-bezier.css +++ b/devtools/client/shared/widgets/cubic-bezier.css @@ -15,7 +15,7 @@ box-sizing: border-box; } -.display-wrap { +.cubic-bezier-container .display-wrap { width: 50%; height: 100%; text-align: center; @@ -24,14 +24,14 @@ /* Coordinate Plane */ -.coordinate-plane { +.cubic-bezier-container .coordinate-plane { width: 150px; height: 370px; margin: 0 auto; position: relative; } -.control-point { +.cubic-bezier-container .control-point { position: absolute; z-index: 1; height: 10px; @@ -46,7 +46,7 @@ cursor: pointer; } -.display-wrap { +.cubic-bezier-container .display-wrap { background: repeating-linear-gradient(0deg, transparent, @@ -66,7 +66,7 @@ -moz-user-select: none; } -canvas.curve { +.cubic-bezier-container canvas.curve { background: linear-gradient(-45deg, transparent 49.7%, @@ -79,14 +79,14 @@ canvas.curve { /* Timing Function Preview Widget */ -.timing-function-preview { +.cubic-bezier-container .timing-function-preview { position: absolute; bottom: 20px; right: 45px; width: 150px; } -.timing-function-preview .scale { +.cubic-bezier-container .timing-function-preview .scale { position: absolute; top: 6px; left: 0; @@ -98,7 +98,7 @@ canvas.curve { background: #ccc; } -.timing-function-preview .dot { +.cubic-bezier-container .timing-function-preview .dot { position: absolute; top: 0; left: -7px; @@ -114,7 +114,7 @@ canvas.curve { /* Preset Widget */ -.preset-pane { +.cubic-bezier-container .preset-pane { width: 50%; height: 100%; border-right: 1px solid var(--theme-splitter-color); @@ -134,7 +134,7 @@ canvas.curve { border-right: none; } -.category { +.cubic-bezier-container .category { padding: 5px 0px; width: 33.33%; text-align: center; @@ -146,16 +146,16 @@ canvas.curve { overflow: hidden; } -.category:hover { +.cubic-bezier-container .category:hover { background-color: var(--theme-tab-toolbar-background); } -.active-category { +.cubic-bezier-container .active-category { background-color: var(--theme-selection-background); color: var(--theme-selection-color); } -.active-category:hover { +.cubic-bezier-container .active-category:hover { background-color: var(--theme-selection-background); } @@ -166,25 +166,25 @@ canvas.curve { overflow-y: auto; } -.preset-list { +.cubic-bezier-container .preset-list { display: none; padding-top: 6px; } -.active-preset-list { +.cubic-bezier-container .active-preset-list { display: flex; flex-wrap: wrap; justify-content: flex-start; } -.preset { +.cubic-bezier-container .preset { cursor: pointer; width: 33.33%; margin: 5px 0px; text-align: center; } -.preset canvas { +.cubic-bezier-container .preset canvas { display: block; border: 1px solid var(--theme-splitter-color); border-radius: 3px; @@ -192,7 +192,7 @@ canvas.curve { margin: 0 auto; } -.preset p { +.cubic-bezier-container .preset p { font-size: 80%; margin: 2px auto 0px auto; color: var(--theme-body-color-alt); @@ -201,16 +201,17 @@ canvas.curve { overflow: hidden; } -.active-preset p, .active-preset:hover p { +.cubic-bezier-container .active-preset p, +.cubic-bezier-container .active-preset:hover p { color: var(--theme-body-color); } -.preset:hover canvas { +.cubic-bezier-container .preset:hover canvas { border-color: var(--theme-selection-background); } -.active-preset canvas, -.active-preset:hover canvas { +.cubic-bezier-container .active-preset canvas, +.cubic-bezier-container .active-preset:hover canvas { background-color: var(--theme-selection-background-semitransparent); border-color: var(--theme-selection-background); } diff --git a/devtools/client/shared/widgets/filter-widget.css b/devtools/client/shared/widgets/filter-widget.css index d015cb5b15c7c..714a95694ab1c 100644 --- a/devtools/client/shared/widgets/filter-widget.css +++ b/devtools/client/shared/widgets/filter-widget.css @@ -19,21 +19,21 @@ -moz-user-select: none; } -.filters-list, -.presets-list { +#filter-container .filters-list, +#filter-container .presets-list { display: flex; flex-direction: column; box-sizing: border-box; } -.filters-list { +#filter-container .filters-list { /* Allow the filters list to take the full width when the presets list is hidden */ flex-grow: 1; padding: 0 6px; } -.presets-list { +#filter-container .presets-list { /* Make sure that when the presets list is shown, it has a fixed width */ width: 200px; padding-left: 6px; @@ -55,8 +55,8 @@ /* The list of filters and list of presets should push their footers to the bottom, so they can take as much space as there is */ -#filters, -#presets { +#filter-container #filters, +#filter-container #presets { flex-grow: 1; /* Avoid pushing below the tooltip's area */ overflow-y: auto; @@ -66,47 +66,47 @@ These footers have some input (taking up as much space as possible) and an add button next */ -.footer { +#filter-container .footer { display: flex; margin: 10px 3px; align-items: center; } -.footer :not(button) { +#filter-container .footer :not(button) { flex-grow: 1; margin-right: 3px; } /* Styles for 1 filter function item */ -.filter, -.filter-name, -.filter-value { +#filter-container .filter, +#filter-container .filter-name, +#filter-container .filter-value { display: flex; align-items: center; } -.filter { +#filter-container .filter { margin: 5px 0; } -.filter-name { +#filter-container .filter-name { width: 120px; margin-right: 10px; } -.filter-name label { +#filter-container .filter-name label { -moz-user-select: none; flex-grow: 1; } -.filter-name label.devtools-draglabel { +#filter-container .filter-name label.devtools-draglabel { cursor: ew-resize; } /* drag/drop handle */ -.filter-name i { +#filter-container .filter-name i { width: 10px; height: 10px; margin-right: 10px; @@ -121,23 +121,23 @@ background-position: 0 1px; } -.filter-value { +#filter-container .filter-value { min-width: 150px; margin-right: 10px; flex: 1; } -.filter-value input { +#filter-container .filter-value input { flex-grow: 1; } /* Fix the size of inputs */ /* Especially needed on Linux where input are bigger */ -input { +#filter-container input { width: 8em; } -.preset { +#filter-container .preset { display: flex; margin-bottom: 10px; cursor: pointer; @@ -147,31 +147,32 @@ input { flex-wrap: wrap; } -.preset label, -.preset span { +#filter-container .preset label, +#filter-container .preset span { display: flex; align-items: center; } -.preset label { +#filter-container .preset label { flex: 1 0; cursor: pointer; color: var(--theme-body-color); } -.preset:hover { +#filter-container .preset:hover { background: var(--theme-selection-background); } -.preset:hover label, .preset:hover span { +#filter-container .preset:hover label, +#filter-container .preset:hover span { color: var(--theme-selection-color); } -.preset .remove-button { +#filter-container .preset .remove-button { order: 2; } -.preset span { +#filter-container .preset span { flex: 2 100%; white-space: nowrap; overflow: hidden; @@ -181,7 +182,7 @@ input { color: var(--theme-body-color-alt); } -.remove-button { +#filter-container .remove-button { width: 16px; height: 16px; background: url(chrome://devtools/skin/images/close.svg); @@ -191,7 +192,7 @@ input { cursor: pointer; } -.hidden { +#filter-container .hidden { display: none !important; } @@ -207,7 +208,7 @@ input { line-height: 20px; } -.add, +#filter-container .add, #toggle-presets { background-size: cover; border: none; @@ -219,7 +220,7 @@ input { margin: 0 5px; } -.add { +#filter-container .add { background: url(chrome://devtools/skin/images/add.svg); } @@ -227,8 +228,8 @@ input { background: url(chrome://devtools/skin/images/pseudo-class.svg); } -.add, -.remove-button, +#filter-container .add, +#filter-container .remove-button, #toggle-presets { filter: var(--icon-filter); } diff --git a/devtools/client/shared/widgets/mdn-docs.css b/devtools/client/shared/widgets/mdn-docs.css index e3547489fa8ff..507872f8b4e31 100644 --- a/devtools/client/shared/widgets/mdn-docs.css +++ b/devtools/client/shared/widgets/mdn-docs.css @@ -28,7 +28,7 @@ margin-top: 1em; } -.devtools-throbber { +.mdn-container .devtools-throbber { align-self: center; opacity: 0; } From c24947b9166bea862b50ee462f54b894776f8385 Mon Sep 17 00:00:00 2001 From: Julian Descottes Date: Tue, 18 Jul 2017 23:41:41 +0200 Subject: [PATCH 016/152] Bug 1374555 - import all tooltip widgets stylesheets in tooltips.css;r=gl Now that scoped stylesheets are no longer supported, it doesn't make sense to include a specific copy of an HTMLTooltip stylesheet in the tooltip container. Nothing guarantees that a given stylesheet won't be loaded more than once so having a single one-shot import in tooltips.css is more appropriate. MozReview-Commit-ID: 690pGNQdnwy --HG-- extra : rebase_source : 7d7fadebf0fb20984508cbc7643998c505ec2ffd --- .../shared/widgets/tooltip/CssDocsTooltip.js | 3 +-- .../shared/widgets/tooltip/HTMLTooltip.js | 18 ------------------ .../tooltip/SwatchBasedEditorTooltip.js | 5 +---- .../tooltip/SwatchColorPickerTooltip.js | 5 +---- .../tooltip/SwatchCubicBezierTooltip.js | 3 +-- .../widgets/tooltip/SwatchFilterTooltip.js | 3 +-- devtools/client/themes/tooltips.css | 7 +++++++ 7 files changed, 12 insertions(+), 32 deletions(-) diff --git a/devtools/client/shared/widgets/tooltip/CssDocsTooltip.js b/devtools/client/shared/widgets/tooltip/CssDocsTooltip.js index d183a68c0688c..4bc66665e65ae 100644 --- a/devtools/client/shared/widgets/tooltip/CssDocsTooltip.js +++ b/devtools/client/shared/widgets/tooltip/CssDocsTooltip.js @@ -23,8 +23,7 @@ function CssDocsTooltip(toolboxDoc) { type: "arrow", consumeOutsideClicks: true, autofocus: true, - useXulWrapper: true, - stylesheet: "chrome://devtools/content/shared/widgets/mdn-docs.css", + useXulWrapper: true }); this.widget = this.setMdnDocsContent(); this._onVisitLink = this._onVisitLink.bind(this); diff --git a/devtools/client/shared/widgets/tooltip/HTMLTooltip.js b/devtools/client/shared/widgets/tooltip/HTMLTooltip.js index 0480d183ce019..f607c1db69dc1 100644 --- a/devtools/client/shared/widgets/tooltip/HTMLTooltip.js +++ b/devtools/client/shared/widgets/tooltip/HTMLTooltip.js @@ -213,15 +213,12 @@ const getRelativeRect = function (node, relativeTo) { * - {Boolean} useXulWrapper * Defaults to false. If the tooltip is hosted in a XUL document, use a XUL panel * in order to use all the screen viewport available. - * - {String} stylesheet - * Style sheet URL to apply to the tooltip content. */ function HTMLTooltip(toolboxDoc, { type = "normal", autofocus = false, consumeOutsideClicks = true, useXulWrapper = false, - stylesheet = "", } = {}) { EventEmitter.decorate(this); @@ -246,9 +243,6 @@ function HTMLTooltip(toolboxDoc, { this.container = this._createContainer(); - if (stylesheet) { - this._applyStylesheet(stylesheet); - } if (this.useXulWrapper) { // When using a XUL panel as the wrapper, the actual markup for the tooltip is as // follows : @@ -634,16 +628,4 @@ HTMLTooltip.prototype = { top += this.doc.defaultView.mozInnerScreenY; return {top, right: left + width, bottom: top + height, left, width, height}; }, - - /** - * Apply a scoped stylesheet to the container so that this css file only - * applies to it. - */ - _applyStylesheet: function (url) { - let style = this.doc.createElementNS(XHTML_NS, "style"); - style.setAttribute("scoped", "true"); - url = url.replace(/"/g, "\\\""); - style.textContent = `@import url("${url}");`; - this.container.appendChild(style); - } }; diff --git a/devtools/client/shared/widgets/tooltip/SwatchBasedEditorTooltip.js b/devtools/client/shared/widgets/tooltip/SwatchBasedEditorTooltip.js index afb47506adc33..9ce92738cd113 100644 --- a/devtools/client/shared/widgets/tooltip/SwatchBasedEditorTooltip.js +++ b/devtools/client/shared/widgets/tooltip/SwatchBasedEditorTooltip.js @@ -19,12 +19,10 @@ const INLINE_TOOLTIP_CLASS = "inline-tooltip-container"; * The document to attach the SwatchBasedEditorTooltip. This is either the toolbox * document if the tooltip is a popup tooltip or the panel's document if it is an * inline editor. - * @param {String} stylesheet - * The stylesheet to be used for the HTMLTooltip. * @param {Boolean} useInline * A boolean flag representing whether or not the InlineTooltip should be used. */ -function SwatchBasedEditorTooltip(document, stylesheet, useInline) { +function SwatchBasedEditorTooltip(document, useInline) { EventEmitter.decorate(this); this.useInline = useInline; @@ -40,7 +38,6 @@ function SwatchBasedEditorTooltip(document, stylesheet, useInline) { type: "arrow", consumeOutsideClicks: true, useXulWrapper: true, - stylesheet }); } diff --git a/devtools/client/shared/widgets/tooltip/SwatchColorPickerTooltip.js b/devtools/client/shared/widgets/tooltip/SwatchColorPickerTooltip.js index 5e222c0921ebb..e24a7df0f7dbb 100644 --- a/devtools/client/shared/widgets/tooltip/SwatchColorPickerTooltip.js +++ b/devtools/client/shared/widgets/tooltip/SwatchColorPickerTooltip.js @@ -38,10 +38,7 @@ const XHTML_NS = "http://www.w3.org/1999/xhtml"; function SwatchColorPickerTooltip(document, inspector, {supportsCssColor4ColorFunction}) { - let stylesheet = NEW_COLOR_WIDGET ? - "chrome://devtools/content/shared/widgets/color-widget.css" : - "chrome://devtools/content/shared/widgets/spectrum.css"; - SwatchBasedEditorTooltip.call(this, document, stylesheet); + SwatchBasedEditorTooltip.call(this, document); this.inspector = inspector; diff --git a/devtools/client/shared/widgets/tooltip/SwatchCubicBezierTooltip.js b/devtools/client/shared/widgets/tooltip/SwatchCubicBezierTooltip.js index 02f6fbea4e00f..45919e6c0e5f1 100644 --- a/devtools/client/shared/widgets/tooltip/SwatchCubicBezierTooltip.js +++ b/devtools/client/shared/widgets/tooltip/SwatchCubicBezierTooltip.js @@ -26,8 +26,7 @@ const XHTML_NS = "http://www.w3.org/1999/xhtml"; * inline editor. */ function SwatchCubicBezierTooltip(document) { - let stylesheet = "chrome://devtools/content/shared/widgets/cubic-bezier.css"; - SwatchBasedEditorTooltip.call(this, document, stylesheet); + SwatchBasedEditorTooltip.call(this, document); // Creating a cubic-bezier instance. // this.widget will always be a promise that resolves to the widget instance diff --git a/devtools/client/shared/widgets/tooltip/SwatchFilterTooltip.js b/devtools/client/shared/widgets/tooltip/SwatchFilterTooltip.js index bc69c3b707430..2461b4aec6aca 100644 --- a/devtools/client/shared/widgets/tooltip/SwatchFilterTooltip.js +++ b/devtools/client/shared/widgets/tooltip/SwatchFilterTooltip.js @@ -28,8 +28,7 @@ const XHTML_NS = "http://www.w3.org/1999/xhtml"; * This can be obtained from "shared/fronts/css-properties.js". */ function SwatchFilterTooltip(document, cssIsValid) { - let stylesheet = "chrome://devtools/content/shared/widgets/filter-widget.css"; - SwatchBasedEditorTooltip.call(this, document, stylesheet); + SwatchBasedEditorTooltip.call(this, document); this._cssIsValid = cssIsValid; // Creating a filter editor instance. diff --git a/devtools/client/themes/tooltips.css b/devtools/client/themes/tooltips.css index 4e37c3a3155fa..e5db9735d8eaa 100644 --- a/devtools/client/themes/tooltips.css +++ b/devtools/client/themes/tooltips.css @@ -3,6 +3,13 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/* Import stylesheets for specific tooltip widgets */ +@import url(chrome://devtools/content/shared/widgets/color-widget.css); +@import url(chrome://devtools/content/shared/widgets/cubic-bezier.css); +@import url(chrome://devtools/content/shared/widgets/filter-widget.css); +@import url(chrome://devtools/content/shared/widgets/mdn-docs.css); +@import url(chrome://devtools/content/shared/widgets/spectrum.css); + /* Tooltip specific theme variables */ .theme-dark { From 1beceff9cc2d51b858d00e8588bd569bbae220e8 Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 19 Jul 2017 07:19:11 +0900 Subject: [PATCH 017/152] Bug 1374940 - Cleanup how toolchain names are derived from task labels. r=dustin We don't need to hardcode that they are prefixed with "toolchain-", when they, in fact, are prefixed with the name of the kind being handled. Also, we used to need to remove a /opt suffix, but bug 1345863 removed those suffixes, so we don't need to remove it anymore. --HG-- extra : rebase_source : 3d73fa06c2bccd71ed1caa1f1ba991bc83f75f5b --- taskcluster/taskgraph/transforms/job/toolchain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskcluster/taskgraph/transforms/job/toolchain.py b/taskcluster/taskgraph/transforms/job/toolchain.py index 0f3d71d007798..4bd1f1b8b9888 100644 --- a/taskcluster/taskgraph/transforms/job/toolchain.py +++ b/taskcluster/taskgraph/transforms/job/toolchain.py @@ -51,7 +51,7 @@ def add_optimizations(config, run, taskdesc): label = taskdesc['label'] subs = { - 'name': label.replace('toolchain-', '').split('/')[0], + 'name': label.replace('%s-' % config.kind, ''), 'digest': hash_paths(GECKO, files), } From dfdf3bda5787c74af9cac16cae85ee462829d05e Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 19 Jul 2017 07:25:26 +0900 Subject: [PATCH 018/152] Bug 1374940 - Allow transforms to access info about tasks from kind dependencies of the current kind. r=dustin --HG-- extra : rebase_source : e5da20afbf84480366cdc197fd25f12237f99346 --- taskcluster/taskgraph/generator.py | 7 ++++++- taskcluster/taskgraph/transforms/base.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/taskcluster/taskgraph/generator.py b/taskcluster/taskgraph/generator.py index 0535e016f3e9c..5a67e2559a68f 100644 --- a/taskcluster/taskgraph/generator.py +++ b/taskcluster/taskgraph/generator.py @@ -49,6 +49,10 @@ def load_tasks(self, parameters, loaded_tasks): else: config['args'] = None + kind_dependencies = config.get('kind-dependencies', []) + kind_dependencies_tasks = [task for task in loaded_tasks + if task.kind in kind_dependencies] + inputs = loader(self.name, self.path, config, parameters, loaded_tasks) transforms = TransformSequence() @@ -57,7 +61,8 @@ def load_tasks(self, parameters, loaded_tasks): transforms.add(transform) # perform the transformations on the loaded inputs - trans_config = TransformConfig(self.name, self.path, config, parameters) + trans_config = TransformConfig(self.name, self.path, config, parameters, + kind_dependencies_tasks) tasks = [Task(self.name, label=task_dict['label'], attributes=task_dict['attributes'], diff --git a/taskcluster/taskgraph/transforms/base.py b/taskcluster/taskgraph/transforms/base.py index d65f8fa3e976e..53efaf12df634 100644 --- a/taskcluster/taskgraph/transforms/base.py +++ b/taskcluster/taskgraph/transforms/base.py @@ -9,7 +9,8 @@ class TransformConfig(object): """A container for configuration affecting transforms. The `config` argument to transforms is an instance of this class, possibly with additional kind-specific attributes beyond those set here.""" - def __init__(self, kind, path, config, params): + def __init__(self, kind, path, config, params, + kind_dependencies_tasks=None): # the name of the current kind self.kind = kind @@ -22,6 +23,10 @@ def __init__(self, kind, path, config, params): # the parameters for this task-graph generation run self.params = params + # a list of all the tasks associated with the kind dependencies of the + # current kind + self.kind_dependencies_tasks = kind_dependencies_tasks + class TransformSequence(object): """ From 28e5237a015e479a1ac4809352a2da85095c214e Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 21 Jun 2017 15:13:35 +0900 Subject: [PATCH 019/152] Bug 1374940 - Add artifact paths to toolchain jobs definitions. r=dustin The toolchain jobs produce artifacts that are going to be used by other jobs, but there is no reliable way for the decision task to know the name of those artifacts. So we make their definition required in the toolchain job definitions. --HG-- extra : rebase_source : eb668cf2bf7793b44dc3d91e3ddb8d7c69e3de52 --- taskcluster/ci/toolchain/linux.yml | 7 +++++++ taskcluster/ci/toolchain/macosx.yml | 3 +++ taskcluster/ci/toolchain/windows.yml | 4 ++++ taskcluster/docs/attributes.rst | 6 +++++- taskcluster/taskgraph/transforms/job/toolchain.py | 9 +++++++++ taskcluster/taskgraph/transforms/task.py | 1 - 6 files changed, 28 insertions(+), 2 deletions(-) diff --git a/taskcluster/ci/toolchain/linux.yml b/taskcluster/ci/toolchain/linux.yml index 0efaf71a1e155..1b30d74342ab5 100644 --- a/taskcluster/ci/toolchain/linux.yml +++ b/taskcluster/ci/toolchain/linux.yml @@ -21,6 +21,7 @@ linux64-clang: resources: - 'build/build-clang/**' - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/clang.tar.xz linux64-clang-tidy: description: "Clang-tidy build" @@ -45,6 +46,7 @@ linux64-clang-tidy: - 'build/clang-plugin/**' - 'build/build-clang/**' - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/clang-tidy.tar.xz linux64-gcc: description: "GCC toolchain build" @@ -62,6 +64,7 @@ linux64-gcc: script: build-gcc-linux.sh resources: - 'build/unix/build-gcc/**' + toolchain-artifact: public/gcc.tar.xz linux64-binutils: description: "Binutils toolchain build" @@ -79,6 +82,7 @@ linux64-binutils: script: build-binutils-linux.sh resources: - 'build/unix/build-binutils/**' + toolchain-artifact: public/binutils.tar.xz linux64-cctools-port: description: "cctools-port toolchain build" @@ -98,6 +102,7 @@ linux64-cctools-port: script: build-cctools-port.sh resources: - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/cctools.tar.xz linux64-hfsplus: description: "hfsplus toolchain build" @@ -118,6 +123,7 @@ linux64-hfsplus: resources: - 'build/unix/build-hfsplus/**' - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/hfsplus-tools.tar.xz linux64-libdmg: description: "libdmg-hfsplus toolchain build" @@ -133,3 +139,4 @@ linux64-libdmg: run: using: toolchain-script script: build-libdmg-hfsplus.sh + toolchain-artifact: public/dmg.tar.xz diff --git a/taskcluster/ci/toolchain/macosx.yml b/taskcluster/ci/toolchain/macosx.yml index c50a7c1c06464..63caa01e758db 100644 --- a/taskcluster/ci/toolchain/macosx.yml +++ b/taskcluster/ci/toolchain/macosx.yml @@ -22,6 +22,7 @@ macosx64-clang: resources: - 'build/build-clang/**' - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/clang.tar.xz macosx64-clang-tidy: description: "Clang-tidy build" @@ -47,6 +48,7 @@ macosx64-clang-tidy: - 'build/clang-plugin/**' - 'build/build-clang/**' - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/clang-tidy.tar.xz macosx64-cctools-port: description: "cctools-port toolchain build" @@ -67,3 +69,4 @@ macosx64-cctools-port: tooltool-downloads: internal resources: - 'taskcluster/scripts/misc/tooltool-download.sh' + toolchain-artifact: public/cctools.tar.bz2 diff --git a/taskcluster/ci/toolchain/windows.yml b/taskcluster/ci/toolchain/windows.yml index cd947f17ab55b..f0f7f6a261181 100644 --- a/taskcluster/ci/toolchain/windows.yml +++ b/taskcluster/ci/toolchain/windows.yml @@ -20,6 +20,7 @@ win32-clang-cl: resources: - 'build/build-clang/**' - 'taskcluster/scripts/misc/build-clang-windows-helper32.sh' + toolchain-artifact: public/build/clang.tar.bz2 win64-clang-cl: description: "Clang-cl toolchain build" @@ -39,6 +40,7 @@ win64-clang-cl: resources: - 'build/build-clang/**' - 'taskcluster/scripts/misc/build-clang-windows-helper64.sh' + toolchain-artifact: public/build/clang.tar.bz2 win32-clang-tidy: description: "Clang-tidy toolchain build" @@ -61,6 +63,7 @@ win32-clang-tidy: resources: - 'build/build-clang/**' - 'taskcluster/scripts/misc/build-clang-windows-helper32.sh' + toolchain-artifact: public/build/clang-tidy.tar.bz2 win64-clang-tidy: description: "Clang-tidy toolchain build" @@ -83,3 +86,4 @@ win64-clang-tidy: resources: - 'build/build-clang/**' - 'taskcluster/scripts/misc/build-clang-windows-helper64.sh' + toolchain-artifact: public/build/clang-tidy.tar.bz2 diff --git a/taskcluster/docs/attributes.rst b/taskcluster/docs/attributes.rst index bc02f102ecbdc..c36cb47c6b2f7 100644 --- a/taskcluster/docs/attributes.rst +++ b/taskcluster/docs/attributes.rst @@ -164,4 +164,8 @@ Signals that the output of this task contains signed artifacts. repackage_type ============== This is the type of repackage. Can be ``repackage`` or -``repacakge_signing``. +``repackage_signing``. + +toolchain-artifact +================== +For toolchain jobs, this is the path to the artifact for that toolchain. diff --git a/taskcluster/taskgraph/transforms/job/toolchain.py b/taskcluster/taskgraph/transforms/job/toolchain.py index 4bd1f1b8b9888..bc48c29ca2236 100644 --- a/taskcluster/taskgraph/transforms/job/toolchain.py +++ b/taskcluster/taskgraph/transforms/job/toolchain.py @@ -39,6 +39,9 @@ # Paths/patterns pointing to files that influence the outcome of a # toolchain build. Optional('resources'): [basestring], + + # Path to the artifact produced by the toolchain job + Required('toolchain-artifact'): basestring, }) @@ -130,6 +133,9 @@ def docker_worker_toolchain(config, job, taskdesc): run['script']) ] + attributes = taskdesc.setdefault('attributes', {}) + attributes['toolchain-artifact'] = run['toolchain-artifact'] + add_optimizations(config, run, taskdesc) @@ -181,4 +187,7 @@ def windows_toolchain(config, job, taskdesc): r'{} -c ./build/src/taskcluster/scripts/misc/{}'.format(bash, run['script']) ] + attributes = taskdesc.setdefault('attributes', {}) + attributes['toolchain-artifact'] = run['toolchain-artifact'] + add_optimizations(config, run, taskdesc) diff --git a/taskcluster/taskgraph/transforms/task.py b/taskcluster/taskgraph/transforms/task.py index 020336e02fde0..01400c4365752 100644 --- a/taskcluster/taskgraph/transforms/task.py +++ b/taskcluster/taskgraph/transforms/task.py @@ -215,7 +215,6 @@ # the exit status code that indicates the task should be retried Optional('retry-exit-status'): int, - }, { Required('implementation'): 'generic-worker', Required('os'): Any('windows', 'macosx'), From 780768bc06f0e81caf14aed2f55963b5f0b37089 Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 21 Jun 2017 15:38:21 +0900 Subject: [PATCH 020/152] Bug 1374940 - Allow to define a list of toolchains to use for a given TC job. r=dustin Such a definition automatically sets up the corresponding dependencies in the taskgraph, and adds the necessary artifact definitions for use in the corresponding jobs. The jobs end up with a MOZ_TOOLCHAINS environment variable with a list of path@task-id strings, where task-id is corresponding to the (possibly optimized) toolchain job, and path corresponding to the toolchain-artifact defined for that toolchain job. --HG-- extra : rebase_source : b2d297bd75d9c416b30d2a6c6d61efcb64681727 --- taskcluster/ci/android-stuff/kind.yml | 4 + taskcluster/ci/artifact-build/kind.yml | 4 + taskcluster/ci/build/kind.yml | 4 + taskcluster/ci/hazard/kind.yml | 4 + taskcluster/ci/spidermonkey/kind.yml | 4 + taskcluster/ci/static-analysis/kind.yml | 4 + taskcluster/ci/toolchain/kind.yml | 1 + taskcluster/ci/valgrind/kind.yml | 4 + taskcluster/taskgraph/transforms/toolchain.py | 76 +++++++++++++++++++ 9 files changed, 105 insertions(+) create mode 100644 taskcluster/taskgraph/transforms/toolchain.py diff --git a/taskcluster/ci/android-stuff/kind.yml b/taskcluster/ci/android-stuff/kind.yml index 8a35329bae7bb..eb1e9ac37929b 100644 --- a/taskcluster/ci/android-stuff/kind.yml +++ b/taskcluster/ci/android-stuff/kind.yml @@ -8,7 +8,11 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.try_job:transforms - taskgraph.transforms.android_stuff:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/artifact-build/kind.yml b/taskcluster/ci/artifact-build/kind.yml index a105b5a15185a..5d6caf28fac21 100644 --- a/taskcluster/ci/artifact-build/kind.yml +++ b/taskcluster/ci/artifact-build/kind.yml @@ -4,8 +4,12 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: - taskgraph.transforms.build_attrs:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/build/kind.yml b/taskcluster/ci/build/kind.yml index 7bdfd65897b5b..dd10928f87b6e 100644 --- a/taskcluster/ci/build/kind.yml +++ b/taskcluster/ci/build/kind.yml @@ -4,10 +4,14 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: - taskgraph.transforms.build:transforms - taskgraph.transforms.build_attrs:transforms - taskgraph.transforms.build_lints:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/hazard/kind.yml b/taskcluster/ci/hazard/kind.yml index 2f61398cebd30..152534f36583d 100644 --- a/taskcluster/ci/hazard/kind.yml +++ b/taskcluster/ci/hazard/kind.yml @@ -4,8 +4,12 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: - taskgraph.transforms.build_attrs:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/spidermonkey/kind.yml b/taskcluster/ci/spidermonkey/kind.yml index d742475768207..d0e0b7a467bf4 100644 --- a/taskcluster/ci/spidermonkey/kind.yml +++ b/taskcluster/ci/spidermonkey/kind.yml @@ -4,8 +4,12 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: - taskgraph.transforms.build_attrs:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/static-analysis/kind.yml b/taskcluster/ci/static-analysis/kind.yml index fc0d953e7a724..b7e0eac753f0a 100644 --- a/taskcluster/ci/static-analysis/kind.yml +++ b/taskcluster/ci/static-analysis/kind.yml @@ -4,8 +4,12 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: - taskgraph.transforms.build_attrs:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/toolchain/kind.yml b/taskcluster/ci/toolchain/kind.yml index 4adcada199e26..23f5290fb6fe0 100644 --- a/taskcluster/ci/toolchain/kind.yml +++ b/taskcluster/ci/toolchain/kind.yml @@ -6,6 +6,7 @@ loader: taskgraph.loader.transform:loader transforms: - taskgraph.transforms.try_job:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/ci/valgrind/kind.yml b/taskcluster/ci/valgrind/kind.yml index 40c357fae4ef8..1d3d846bc6d88 100644 --- a/taskcluster/ci/valgrind/kind.yml +++ b/taskcluster/ci/valgrind/kind.yml @@ -4,8 +4,12 @@ loader: taskgraph.loader.transform:loader +kind-dependencies: + - toolchain + transforms: - taskgraph.transforms.build_attrs:transforms + - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/taskgraph/transforms/toolchain.py b/taskcluster/taskgraph/transforms/toolchain.py new file mode 100644 index 0000000000000..c76d95d80e1be --- /dev/null +++ b/taskcluster/taskgraph/transforms/toolchain.py @@ -0,0 +1,76 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import, print_function, unicode_literals + +import os +from taskgraph.transforms.base import TransformSequence + + +transforms = TransformSequence() + + +@transforms.add +def use_toolchains(config, jobs): + """Add dependencies corresponding to toolchains to use, and pass a list + of corresponding artifacts to jobs using toolchains. + """ + artifacts = {} + # Toolchain jobs can depend on other toolchain jobs, but we don't have full + # tasks for them, since they're being transformed. So scan the jobs list in + # that case, otherwise, use the list of tasks for the kind dependencies. + if config.kind == 'toolchain': + jobs = list(jobs) + for job in jobs: + artifact = job.get('run', {}).get('toolchain-artifact') + if artifact: + artifacts[job['name']] = artifact + else: + for task in config.kind_dependencies_tasks: + if task.kind != 'toolchain': + continue + artifact = task.attributes.get('toolchain-artifact') + if artifact: + artifacts[task.label.replace('%s-' % task.kind, '')] = artifact + + for job in jobs: + env = job.setdefault('worker', {}).setdefault('env', {}) + + toolchains = job.pop('toolchains', []) + + if config.kind == 'toolchain' and job['name'] in toolchains: + raise Exception("Toolchain job %s can't use itself as toolchain" + % job['name']) + + filenames = {} + for t in toolchains: + if t not in artifacts: + raise Exception('Missing toolchain job for %s-%s: %s' + % (config.kind, job['name'], t)) + + f = os.path.basename(artifacts[t]) + if f in filenames: + # Build jobs don't support toolchain artifacts with the same + # name: they would overwrite one with the other. + raise Exception('%s-%s cannot use both %s and %s toolchains: ' + 'they both have the same artifact name %s' + % (config.kind, job['name'], filenames[f], + t, f)) + filenames[f] = t + + if toolchains: + job.setdefault('dependencies', {}).update( + ('toolchain-%s' % t, 'toolchain-%s' % t) + for t in toolchains + ) + # Pass a list of artifact-path@task-id to the job for all the + # toolchain artifacts it's going to need, where task-id is + # corresponding to the (possibly optimized) toolchain job, and + # artifact-path to the toolchain-artifact defined for that + # toolchain job. + env['MOZ_TOOLCHAINS'] = {'task-reference': ' '.join( + '%s@' % (artifacts[t], t) + for t in toolchains + )} + + yield job From 8b22bb293b3d453360d2a0b80b5a3a3d9abdeeb9 Mon Sep 17 00:00:00 2001 From: Michael Comella Date: Tue, 18 Jul 2017 13:56:21 -0700 Subject: [PATCH 021/152] Bug 1381984: Replace StringUtils.join with TextUtils.join. r=liuche I verified the behavior is the same by replacing StringUtils.join with TextUtils.join in the StringUtils.join test (which passed) before deleting it all. MozReview-Commit-ID: 1pmSdTuG0LU --HG-- extra : rebase_source : 3c20a5ceccaebd3d30a935b3ea20c87ace7d628f extra : source : 9cdc0fcca44abc8f2ec1ea8f853ca31f8d59ce10 --- .../org/mozilla/gecko/util/StringUtils.java | 19 ------------------- .../gecko/util/publicsuffix/PublicSuffix.java | 4 ++-- .../mozilla/gecko/util/TestStringUtils.java | 17 ----------------- 3 files changed, 2 insertions(+), 38 deletions(-) diff --git a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/StringUtils.java b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/StringUtils.java index ad8c5f80a1013..2704a13e2f5f2 100644 --- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/StringUtils.java +++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/StringUtils.java @@ -276,23 +276,4 @@ public static String forceLTR(String text) { return "\u200E" + text; } - - /** - * Joining together a sequence of strings with a separator. - */ - public static String join(@NonNull String separator, @NonNull List parts) { - if (parts.size() == 0) { - return ""; - } - - final StringBuilder builder = new StringBuilder(); - builder.append(parts.get(0)); - - for (int i = 1; i < parts.size(); i++) { - builder.append(separator); - builder.append(parts.get(i)); - } - - return builder.toString(); - } } diff --git a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/publicsuffix/PublicSuffix.java b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/publicsuffix/PublicSuffix.java index 6a146cfcf75c3..be8ba0f601e09 100644 --- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/publicsuffix/PublicSuffix.java +++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/util/publicsuffix/PublicSuffix.java @@ -8,7 +8,7 @@ import android.support.annotation.NonNull; import android.support.annotation.WorkerThread; -import org.mozilla.gecko.util.StringUtils; +import android.text.TextUtils; import java.util.ArrayList; import java.util.Collections; @@ -59,7 +59,7 @@ private static int findPublicSuffixIndex(Context context, String domain) { final Set exact = PublicSuffixPatterns.getExactSet(context); for (int i = 0; i < partsSize; i++) { - String ancestorName = StringUtils.join(".", parts.subList(i, partsSize)); + String ancestorName = TextUtils.join(".", parts.subList(i, partsSize)); if (exact.contains(ancestorName)) { return joinIndex(parts, i); diff --git a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/util/TestStringUtils.java b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/util/TestStringUtils.java index 6e551330b415b..24db04a13b945 100644 --- a/mobile/android/tests/background/junit4/src/org/mozilla/gecko/util/TestStringUtils.java +++ b/mobile/android/tests/background/junit4/src/org/mozilla/gecko/util/TestStringUtils.java @@ -103,23 +103,6 @@ public void testForceLTR() { assertEquals(5, forcedAgainLtrString.length()); } - @Test - public void testJoin() { - assertEquals("", StringUtils.join("", Collections.emptyList())); - assertEquals("", StringUtils.join("-", Collections.emptyList())); - assertEquals("", StringUtils.join("", Collections.singletonList(""))); - assertEquals("", StringUtils.join(".", Collections.singletonList(""))); - - assertEquals("192.168.0.1", StringUtils.join(".", Arrays.asList("192", "168", "0", "1"))); - assertEquals("www.mozilla.org", StringUtils.join(".", Arrays.asList("www", "mozilla", "org"))); - - assertEquals("hello", StringUtils.join("", Collections.singletonList("hello"))); - assertEquals("helloworld", StringUtils.join("", Arrays.asList("hello", "world"))); - assertEquals("hello world", StringUtils.join(" ", Arrays.asList("hello", "world"))); - - assertEquals("m::o::z::i::l::l::a", StringUtils.join("::", Arrays.asList("m", "o", "z", "i", "l", "l", "a"))); - } - @Test public void testIsSearchQuery(){ boolean any = true; From 4c6aca3b5aecb6e71ba6e898ac9038d80f9e038c Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Thu, 20 Jul 2017 14:06:23 +0900 Subject: [PATCH 022/152] Bug 1382525 - Move LLVM_CONFIG out of mozconfig.common. r=chmanchester LLVM_CONFIG, per the contents of toolkit/moz.configure, is tied to --enable-stylo, but it currently is set on all types of builds. It currently happens to work, but it's actually not meant to, and sure enough, the fix for bug 1374727 exacerbates that. So we create a new mozconfig.stylo file that enables stylo and sets LLVM_CONFIG, such that only build types that do enable stylo have LLVM_CONFIG set. --HG-- extra : rebase_source : 01277a79951888046c0b8e29c61cfc3b049ee0f0 --- browser/config/mozconfigs/linux64/common-opt | 4 +--- browser/config/mozconfigs/linux64/debug | 3 +-- .../config/mozconfigs/linux64/debug-static-analysis-clang | 1 + browser/config/mozconfigs/linux64/opt-static-analysis-clang | 1 + browser/config/mozconfigs/linux64/valgrind | 1 + browser/config/mozconfigs/win32/common-opt | 4 +--- browser/config/mozconfigs/win32/debug | 4 +--- browser/config/mozconfigs/win64/common-opt | 4 +--- browser/config/mozconfigs/win64/debug | 4 +--- build/macosx/mozconfig.common | 4 +--- build/mozconfig.common | 3 --- build/mozconfig.no-compile | 1 + build/mozconfig.stylo | 6 ++++++ 13 files changed, 17 insertions(+), 23 deletions(-) create mode 100644 build/mozconfig.stylo diff --git a/browser/config/mozconfigs/linux64/common-opt b/browser/config/mozconfigs/linux64/common-opt index 94285d1d62b8a..2b810f2c20d0a 100644 --- a/browser/config/mozconfigs/linux64/common-opt +++ b/browser/config/mozconfigs/linux64/common-opt @@ -1,8 +1,6 @@ # This file is sourced by the nightly, beta, and release mozconfigs. -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. $topsrcdir/build/mozconfig.stylo ac_add_options --enable-update-channel=${MOZ_UPDATE_CHANNEL} ac_add_options --with-google-api-keyfile=/builds/gapi.data diff --git a/browser/config/mozconfigs/linux64/debug b/browser/config/mozconfigs/linux64/debug index b68d173df1ae8..8914f1336b053 100644 --- a/browser/config/mozconfigs/linux64/debug +++ b/browser/config/mozconfigs/linux64/debug @@ -2,8 +2,7 @@ ac_add_options --enable-debug ac_add_options --enable-dmd ac_add_options --enable-verify-mar -# TODO remove once configure defaults to stylo once stylo enabled -ac_add_options --enable-stylo=build +. $topsrcdir/build/mozconfig.stylo MOZ_AUTOMATION_L10N_CHECK=0 diff --git a/browser/config/mozconfigs/linux64/debug-static-analysis-clang b/browser/config/mozconfigs/linux64/debug-static-analysis-clang index db96894be2b95..d8d0be69556f2 100644 --- a/browser/config/mozconfigs/linux64/debug-static-analysis-clang +++ b/browser/config/mozconfigs/linux64/debug-static-analysis-clang @@ -9,6 +9,7 @@ ac_add_options --enable-dmd # Disable stylo until bug 1356926 is fixed and we have >= llvm39 on centos. ac_add_options --disable-stylo +unset LLVM_CONFIG # Use Clang as specified in manifest export CC="$topsrcdir/clang/bin/clang" diff --git a/browser/config/mozconfigs/linux64/opt-static-analysis-clang b/browser/config/mozconfigs/linux64/opt-static-analysis-clang index d5929a065babd..4635ae84ee3ac 100644 --- a/browser/config/mozconfigs/linux64/opt-static-analysis-clang +++ b/browser/config/mozconfigs/linux64/opt-static-analysis-clang @@ -8,6 +8,7 @@ ac_add_options --enable-dmd # Disable stylo until bug 1356926 is fixed and we have >= llvm39 on centos. ac_add_options --disable-stylo +unset LLVM_CONFIG # Use Clang as specified in manifest CC="$topsrcdir/clang/bin/clang" diff --git a/browser/config/mozconfigs/linux64/valgrind b/browser/config/mozconfigs/linux64/valgrind index 1bb310a98df6f..efcc51d3aec60 100644 --- a/browser/config/mozconfigs/linux64/valgrind +++ b/browser/config/mozconfigs/linux64/valgrind @@ -8,6 +8,7 @@ ac_add_options --disable-gtest-in-build # Rust code gives false positives that we have not entirely suppressed yet. # Bug 1365915 tracks fixing these. ac_add_options --disable-stylo +unset LLVM_CONFIG # Include the override mozconfig again (even though the above includes it) # since it's supposed to override everything. diff --git a/browser/config/mozconfigs/win32/common-opt b/browser/config/mozconfigs/win32/common-opt index ad10763e19372..cec7b0b6e75a5 100644 --- a/browser/config/mozconfigs/win32/common-opt +++ b/browser/config/mozconfigs/win32/common-opt @@ -1,8 +1,6 @@ # This file is sourced by the nightly, beta, and release mozconfigs. -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. "$topsrcdir/build/mozconfig.stylo" . "$topsrcdir/browser/config/mozconfigs/common" diff --git a/browser/config/mozconfigs/win32/debug b/browser/config/mozconfigs/win32/debug index 3fea63d96634f..e4f88d2b0941b 100644 --- a/browser/config/mozconfigs/win32/debug +++ b/browser/config/mozconfigs/win32/debug @@ -2,9 +2,7 @@ MOZ_AUTOMATION_L10N_CHECK=0 . "$topsrcdir/browser/config/mozconfigs/common" -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. "$topsrcdir/build/mozconfig.stylo" ac_add_options --enable-debug ac_add_options --enable-dmd diff --git a/browser/config/mozconfigs/win64/common-opt b/browser/config/mozconfigs/win64/common-opt index 305fd974bb329..4cd860705eda0 100644 --- a/browser/config/mozconfigs/win64/common-opt +++ b/browser/config/mozconfigs/win64/common-opt @@ -1,8 +1,6 @@ # This file is sourced by the nightly, beta, and release mozconfigs. -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. "$topsrcdir/build/mozconfig.stylo" . "$topsrcdir/browser/config/mozconfigs/common" diff --git a/browser/config/mozconfigs/win64/debug b/browser/config/mozconfigs/win64/debug index fe5d8404d1165..96a6a2211a808 100644 --- a/browser/config/mozconfigs/win64/debug +++ b/browser/config/mozconfigs/win64/debug @@ -5,9 +5,7 @@ MOZ_AUTOMATION_L10N_CHECK=0 ac_add_options --target=x86_64-pc-mingw32 ac_add_options --host=x86_64-pc-mingw32 -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. "$topsrcdir/build/mozconfig.stylo" ac_add_options --enable-debug ac_add_options --enable-dmd diff --git a/build/macosx/mozconfig.common b/build/macosx/mozconfig.common index a459475a7be67..487981fa1740a 100644 --- a/build/macosx/mozconfig.common +++ b/build/macosx/mozconfig.common @@ -4,6 +4,4 @@ else . $topsrcdir/build/macosx/local-mozconfig.common fi -# Enable stylo in automation builds. -# Can be removed after bug 1375774 is resolved. -ac_add_options --enable-stylo=build +. $topsrcdir/build/mozconfig.stylo diff --git a/build/mozconfig.common b/build/mozconfig.common index 24266efbebe55..b9a4b5e941769 100644 --- a/build/mozconfig.common +++ b/build/mozconfig.common @@ -14,9 +14,6 @@ mk_add_options AUTOCLOBBER=1 ac_add_options --enable-crashreporter -# Tell the build system where to find llvm-config for builds on automation. -export LLVM_CONFIG="${TOOLTOOL_DIR:-$topsrcdir}/clang/bin/llvm-config" - # Enable checking that add-ons are signed by the trusted root MOZ_ADDON_SIGNING=${MOZ_ADDON_SIGNING-1} # Disable enforcing that add-ons are signed by the trusted root diff --git a/build/mozconfig.no-compile b/build/mozconfig.no-compile index 2d90451422c14..65628df01a6db 100644 --- a/build/mozconfig.no-compile +++ b/build/mozconfig.no-compile @@ -10,3 +10,4 @@ unset CARGO unset MAKECAB unset TOOLCHAIN_PREFIX unset BINDGEN_CFLAGS +unset LLVM_CONFIG diff --git a/build/mozconfig.stylo b/build/mozconfig.stylo new file mode 100644 index 0000000000000..368cf33c263e1 --- /dev/null +++ b/build/mozconfig.stylo @@ -0,0 +1,6 @@ +# Tell the build system where to find llvm-config for builds on automation. +export LLVM_CONFIG="${TOOLTOOL_DIR:-$topsrcdir}/clang/bin/llvm-config" + +# TODO remove once configure defaults to stylo once stylo enabled +# on all platforms. +ac_add_options --enable-stylo=build From 0cdaafffc56bf475a0bfa827b18dc1bddb9e1148 Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 21 Jun 2017 07:19:33 +0900 Subject: [PATCH 023/152] Bug 1374727 - Apply check_prog's `when` to more of what it "expands" to. r=chmanchester check_prog, when used with a `when` argument, doesn't work the same way as putting it under a `with only_when()` block, while it should. The difference comes from the fact that `with only_when()` applies the `when` to every option and depends used in the block (which check_prog calls a bunch of). So, we "manually" apply the `when` to all option and depends in check_prog. An alternative solution would be to put the whole function under a `with only_when()` block, but that would mean reindenting the whole function. Either way, as a consequence, this requires the `when` to have a dependency on --help for "non-trivial" functions, which fortunately, there's only one of. --HG-- extra : rebase_source : d91eca9e303c7062394d92a526983714ef3e612f --- build/moz.configure/checks.configure | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build/moz.configure/checks.configure b/build/moz.configure/checks.configure index 88c70572ecd5e..7e5c237fd6825 100644 --- a/build/moz.configure/checks.configure +++ b/build/moz.configure/checks.configure @@ -97,7 +97,7 @@ def check_prog(var, progs, what=None, input=None, allow_missing=False, paths=None, when=None): if input is not None: # Wrap input with type checking and normalization. - @depends(input) + @depends(input, when=when) def input(value): if not value: return @@ -108,7 +108,7 @@ def check_prog(var, progs, what=None, input=None, allow_missing=False, configure_error('input must resolve to a tuple or a list with a ' 'single element, or a string') else: - option(env=var, nargs=1, + option(env=var, nargs=1, when=when, help='Path to %s' % (what or 'the %s program' % var.lower())) input = var what = what or var.lower() @@ -135,7 +135,7 @@ def check_prog(var, progs, what=None, input=None, allow_missing=False, if not allow_missing or value: raise FatalCheckError('Cannot find %s' % what) - @depends_if(check, progs) + @depends_if(check, progs, when=when) def normalized_for_config(value, progs): return ':' if value is None else value From f4917a761165270ca3fbb4d7d7b794533bb6323a Mon Sep 17 00:00:00 2001 From: Manish Goregaokar Date: Thu, 20 Jul 2017 14:15:00 -0700 Subject: [PATCH 024/152] Bug 1382190: Make fields public to avoid assertions; r=bustage MozReview-Commit-ID: 4t4RfnlmFy0 --- layout/style/ServoTypes.h | 1 - 1 file changed, 1 deletion(-) diff --git a/layout/style/ServoTypes.h b/layout/style/ServoTypes.h index a95a8231e04a7..3137399eda6a4 100644 --- a/layout/style/ServoTypes.h +++ b/layout/style/ServoTypes.h @@ -157,7 +157,6 @@ enum ServoKeywordSize { // have the same size and alignment as the Rust version. // Ensure layout tests get run if touching either side. struct ServoFontComputationData { -private: ServoKeywordSize mKeyword; float/*32_t*/ mRatio; From 78793f2a1e7f7eb86e62ef9d9869a25fdfd0255b Mon Sep 17 00:00:00 2001 From: Xidorn Quan Date: Thu, 20 Jul 2017 13:08:58 -0700 Subject: [PATCH 025/152] servo: Merge #17792 - Fix supports rule parsing issues with (from upsuper:supports-any-value); r=SimonSapin This eventually fixes #15482, as well as several reftests in mozilla-central which were added for [bug 883987](https://bugzilla.mozilla.org/show_bug.cgi?id=883987). The new function should probably be moved into cssparser crate at some point. Source-Repo: https://github.com/servo/servo Source-Revision: e19fefcb474ea6593a684a1ca4ce616e61188ff0 --HG-- extra : subtree_source : https%3A//hg.mozilla.org/projects/converted-servo-linear extra : subtree_revision : 8a2ae3ea22150665ff141aa141aedf8074fe672a --- .../style/stylesheets/supports_rule.rs | 41 ++++++++----------- servo/tests/unit/style/parsing/supports.rs | 2 + 2 files changed, 18 insertions(+), 25 deletions(-) diff --git a/servo/components/style/stylesheets/supports_rule.rs b/servo/components/style/stylesheets/supports_rule.rs index ff856bacf3ea7..5636a55cadc00 100644 --- a/servo/components/style/stylesheets/supports_rule.rs +++ b/servo/components/style/stylesheets/supports_rule.rs @@ -128,21 +128,18 @@ impl SupportsCondition { let pos = input.position(); match input.next()? { Token::ParenthesisBlock => { - input.parse_nested_block(|input| { - // `input.try()` not needed here since the alternative uses `consume_all()`. - parse_condition_or_declaration(input).or_else(|_| { - consume_all(input); - Ok(SupportsCondition::FutureSyntax(input.slice_from(pos).to_owned())) - }) - }) - } - Token::Function(_) => { - let result: Result<_, ParseError> = input.parse_nested_block(|i| Ok(consume_all(i))); - result.unwrap(); - Ok(SupportsCondition::FutureSyntax(input.slice_from(pos).to_owned())) + let nested = input.try(|input| { + input.parse_nested_block(|i| parse_condition_or_declaration(i)) + }); + if nested.is_ok() { + return nested; + } } - t => Err(CssParseError::Basic(BasicParseError::UnexpectedToken(t))) + Token::Function(_) => {} + t => return Err(CssParseError::Basic(BasicParseError::UnexpectedToken(t))), } + input.parse_nested_block(|i| consume_any_value(i))?; + Ok(SupportsCondition::FutureSyntax(input.slice_from(pos).to_owned())) } /// Evaluate a supports condition @@ -235,16 +232,9 @@ impl ToCss for Declaration { } } -/// Slurps up input till exhausted, return string from source position -fn parse_anything(input: &mut Parser) -> String { - let pos = input.position(); - consume_all(input); - input.slice_from(pos).to_owned() -} - -/// Consume input till done -fn consume_all(input: &mut Parser) { - while let Ok(_) = input.next() {} +/// https://drafts.csswg.org/css-syntax-3/#typedef-any-value +fn consume_any_value<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), ParseError<'i>> { + input.expect_no_error_token().map_err(|err| err.into()) } impl Declaration { @@ -252,8 +242,9 @@ impl Declaration { pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result> { let prop = input.expect_ident()?.into_owned(); input.expect_colon()?; - let val = parse_anything(input); - Ok(Declaration { prop: prop, val: val }) + let pos = input.position(); + consume_any_value(input)?; + Ok(Declaration { prop: prop, val: input.slice_from(pos).to_owned() }) } /// Determine if a declaration parses diff --git a/servo/tests/unit/style/parsing/supports.rs b/servo/tests/unit/style/parsing/supports.rs index bd7670ed17047..573be52d76777 100644 --- a/servo/tests/unit/style/parsing/supports.rs +++ b/servo/tests/unit/style/parsing/supports.rs @@ -11,4 +11,6 @@ fn test_supports_condition() { assert_roundtrip!(SupportsCondition::parse, "(margin: 1px)"); assert_roundtrip!(SupportsCondition::parse, "not (--be: to be)"); assert_roundtrip!(SupportsCondition::parse, "(color: blue) and future-extension(4)"); + assert_roundtrip!(SupportsCondition::parse, "future-\\1 extension(4)"); + assert_roundtrip!(SupportsCondition::parse, "((test))"); } From 499d2ea19cc88012a9bc9694259db6eee109a8b3 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Thu, 20 Jul 2017 08:24:36 -0700 Subject: [PATCH 026/152] Bug 1381741 - Only fail gold detection when --enable-gold is specified; r=glandium developer_options (!--enable-release) implies to search for and prefer the gold linker. As part of porting gold detection to moz.configure in d0e782180741 (bug 1351109), the logic inadvertently changed to require gold when either --enable-gold or !--enable-release were present. This commit relaxes the requirement to find gold when only --enable-gold is true. MozReview-Commit-ID: HTBicWNUkvy --HG-- extra : rebase_source : dd3938a7914f5db6c315fb775e7cc5ea177bf600 --- build/moz.configure/toolchain.configure | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/build/moz.configure/toolchain.configure b/build/moz.configure/toolchain.configure index fc08ea4b3b8f8..b2486abbd58a6 100644 --- a/build/moz.configure/toolchain.configure +++ b/build/moz.configure/toolchain.configure @@ -1105,18 +1105,19 @@ def enable_gold(enable_gold_option, c_compiler, developer_options, build_env): # Used to check the kind of linker version_check = ['-Wl,--version'] cmd_base = c_compiler.wrapper + [c_compiler.compiler] + c_compiler.flags - if enable_gold_option or developer_options: + + def resolve_gold(): # Try to force the usage of gold targetDir = os.path.join(build_env.topobjdir, 'build', 'unix', 'gold') gold_detection_arg = '-print-prog-name=ld.gold' gold = check_cmd_output(c_compiler.compiler, gold_detection_arg).strip() if not gold: - die('Could not find gold') + return goldFullPath = find_program(gold) if goldFullPath is None: - die('Could not find gold') + return if os.path.exists(targetDir): shutil.rmtree(targetDir) @@ -1135,6 +1136,16 @@ def enable_gold(enable_gold_option, c_compiler, developer_options, build_env): # The -B trick didn't work, removing the directory shutil.rmtree(targetDir) + if enable_gold_option or developer_options: + result = resolve_gold() + + if result: + return result + # gold is only required if --enable-gold is used. + elif enable_gold_option: + die('Could not find gold') + # Else fallthrough. + cmd = cmd_base + version_check cmd_output = check_cmd_output(*cmd).decode('utf-8') # using decode because ld can be localized and python will From bc65b799ffd31de0a9f953e797873fe8fe60f902 Mon Sep 17 00:00:00 2001 From: Wes Kocher Date: Thu, 20 Jul 2017 14:35:53 -0700 Subject: [PATCH 027/152] Bug 1382525 - Followup to update a few more mozconfigs a=bustage MozReview-Commit-ID: KqAjwoj9wSX --HG-- extra : rebase_source : aa313e4e8bcbd24f45f4a335904a9eb905b2bf50 --- browser/config/mozconfigs/linux64/nightly-asan | 4 +--- browser/config/mozconfigs/linux64/nightly-fuzzing-asan | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/browser/config/mozconfigs/linux64/nightly-asan b/browser/config/mozconfigs/linux64/nightly-asan index 9975321bab83e..97801c62cbb15 100644 --- a/browser/config/mozconfigs/linux64/nightly-asan +++ b/browser/config/mozconfigs/linux64/nightly-asan @@ -2,9 +2,7 @@ ac_add_options --disable-debug ac_add_options --enable-optimize="-O2 -gline-tables-only" -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. $topsrcdir/build/mozconfig.stylo # ASan specific options on Linux ac_add_options --enable-valgrind diff --git a/browser/config/mozconfigs/linux64/nightly-fuzzing-asan b/browser/config/mozconfigs/linux64/nightly-fuzzing-asan index 4eca7a05d06d8..8ac5df1ddfdfa 100644 --- a/browser/config/mozconfigs/linux64/nightly-fuzzing-asan +++ b/browser/config/mozconfigs/linux64/nightly-fuzzing-asan @@ -2,9 +2,7 @@ ac_add_options --disable-debug ac_add_options --enable-optimize="-O2 -gline-tables-only" -# TODO remove once configure defaults to stylo once stylo enabled -# on all platforms. -ac_add_options --enable-stylo=build +. $topsrcdir/build/mozconfig.stylo # ASan specific options on Linux ac_add_options --enable-valgrind From a83c0b81cd909f51f89f8521424c5f5d27dc4589 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Tue, 18 Jul 2017 17:39:27 -0700 Subject: [PATCH 028/152] Bug 1377216 - Unset MOZ_AUTOMATION when building SpiderMonkey from package; r=glandium We're about to make MOZ_AUTOMATION more strict about things like having a source checkout. The whole point of build-sm-package.sh is to verify that SpiderMonkey can be built outside of Mozilla's source repo and automation from a standalone package. Since the presence of MOZ_AUTOMATION can influence so much behavior in the build system, unset it so that the job tests a !Mozilla environment more accurately. MozReview-Commit-ID: EMfyLKfY0uU --HG-- extra : rebase_source : 3632a9abf9fac3f916ed9043f30d4b6aa4abb390 --- taskcluster/scripts/builder/build-sm-package.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/taskcluster/scripts/builder/build-sm-package.sh b/taskcluster/scripts/builder/build-sm-package.sh index 6bb819f26663c..2a17c428dc021 100755 --- a/taskcluster/scripts/builder/build-sm-package.sh +++ b/taskcluster/scripts/builder/build-sm-package.sh @@ -21,6 +21,11 @@ tar -xjvf $UPLOAD_DIR/mozjs-*.tar.bz2 # Build the freshly extracted, packaged SpiderMonkey. pushd ./mozjs-*/js/src + +# MOZ_AUTOMATION enforces certain requirements that don't apply to +# packaged builds. Unset it. +unset MOZ_AUTOMATION + AUTOMATION=1 $PYTHON ./devtools/automation/autospider.py --skip-tests=checks $SPIDERMONKEY_VARIANT popd From b47fb974f14d3f209601cf04723ae27ec2f62956 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Tue, 18 Jul 2017 18:06:03 -0700 Subject: [PATCH 029/152] Bug 1377216 - Accept environment variables to check_cmd_output; r=glandium And include code to work around a bug on older Python versions. MozReview-Commit-ID: 4pBnMQQJOGB --HG-- extra : rebase_source : 6f7c5784230bd37b3496b9bb1781e8d342f741b4 --- build/moz.configure/util.configure | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/build/moz.configure/util.configure b/build/moz.configure/util.configure index e3a552d82a293..538f6a6b0aeab 100644 --- a/build/moz.configure/util.configure +++ b/build/moz.configure/util.configure @@ -22,6 +22,7 @@ def configure_error(message): # by running the given command if it exits normally, and streams that # output to log.debug and calls die or the given error callback if it # does not. +@imports(_from='__builtin__', _import='unicode') @imports('subprocess') @imports('sys') @imports(_from='mozbuild.configure.util', _import='LineIO') @@ -29,10 +30,26 @@ def configure_error(message): def check_cmd_output(*args, **kwargs): onerror = kwargs.pop('onerror', None) + # subprocess on older Pythons can't handle unicode keys or values in + # environment dicts. Normalize automagically so callers don't have to + # deal with this. + if 'env' in kwargs: + normalized_env = {} + for k, v in kwargs['env'].items(): + if isinstance(k, unicode): + k = k.encode('utf-8', 'strict') + + if isinstance(v, unicode): + v = v.encode('utf-8', 'strict') + + normalized_env[k] = v + + kwargs['env'] = normalized_env + with log.queue_debug(): log.debug('Executing: `%s`', quote(*args)) proc = subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + stderr=subprocess.PIPE, **kwargs) stdout, stderr = proc.communicate() retcode = proc.wait() if retcode == 0: From fa4893f658448b106d3a2c92b3e8d9a1eb94b5b2 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Tue, 18 Jul 2017 18:07:29 -0700 Subject: [PATCH 030/152] Bug 1377216 - Discover version control info in configure; r=glandium For reasons unknown to me, Windows CI is periodically failing to find the Mercurial binary. In addition, we've also reimplemented various VCS logic throughout the build system. There is room to cut down on code complexity by e.g. recording VCS info in configure instead of determining it at run-time. Also, for forensic purposes it is sometimes desirable to know which VCS tool is in use by a build and which version of that tool is being used. This commit adds VCS type detection, binary searching, and version resolution to configure. substs now contains VCS_CHECKOUT_TYPE, HG, and GIT, which can be consulted by downstream consumers. If the Mercurial or Git versions could not be resolved, all variables are not set. Otherwise, VCS_CHECKOUT_TYPE and one of HG or GIT is set. If MOZ_AUTOMATION is set, we require that the VCS info be resolved. This helps prevents weirdness in automation due to unexpected environment configuration. MozReview-Commit-ID: AMLy0Hfx5rD --HG-- extra : rebase_source : edef9165d32dc47308a14b0fbabce3c1d3d28176 --- build/moz.configure/init.configure | 80 ++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/build/moz.configure/init.configure b/build/moz.configure/init.configure index 755a5a88d1709..24f95a36b0b06 100644 --- a/build/moz.configure/init.configure +++ b/build/moz.configure/init.configure @@ -309,6 +309,86 @@ def shell(value, mozillabuild): return find_program(shell) +# Source checkout and version control integration. +# ================================================ + +@depends(check_build_environment, 'MOZ_AUTOMATION') +@checking('for vcs source checkout') +@imports('os') +def vcs_checkout_type(build_env, automation): + if os.path.exists(os.path.join(build_env.topsrcdir, '.hg')): + return 'hg' + elif os.path.exists(os.path.join(build_env.topsrcdir, '.git')): + return 'git' + elif automation: + raise FatalCheckError('unable to resolve VCS type; must run ' + 'from a source checkout when MOZ_AUTOMATION ' + 'is set') + +# Resolve VCS binary for detected repository type. +hg = check_prog('HG', ('hg',), allow_missing=True, + when=depends(vcs_checkout_type)(lambda x: x == 'hg')) +git = check_prog('GIT', ('git',), allow_missing=True, + when=depends(vcs_checkout_type)(lambda x: x == 'git')) + +@depends_if(hg) +@checking('for Mercurial version') +@imports('os') +@imports('re') +def hg_version(hg): + # HGPLAIN in Mercurial 1.5+ forces stable output, regardless of set + # locale or encoding. + env = dict(os.environ) + env['HGPLAIN'] = '1' + + out = check_cmd_output(hg, '--version', env=env) + + match = re.search(r'Mercurial Distributed SCM \(version ([^\)]+)', out) + + if not match: + raise FatalCheckError('unable to determine Mercurial version: %s' % out) + + # The version string may be "unknown" for Mercurial run out of its own + # source checkout or for bad builds. But LooseVersion handles it. + + return Version(match.group(1)) + +@depends_if(git) +@checking('for Git version') +@imports('re') +def git_version(git): + out = check_cmd_output(git, '--version').rstrip() + + match = re.search('git version (.*)$', out) + + if not match: + raise FatalCheckError('unable to determine Git version: %s' % out) + + return Version(match.group(1)) + +# Only set VCS_CHECKOUT_TYPE if we resolved the VCS binary. +# Require resolved VCS info when running in automation so automation's +# environment is more well-defined. +@depends(vcs_checkout_type, hg_version, git_version, 'MOZ_AUTOMATION') +def exposed_vcs_checkout_type(vcs_checkout_type, hg, git, automation): + if vcs_checkout_type == 'hg': + if hg: + return 'hg' + + if automation: + raise FatalCheckError('could not resolve Mercurial binary info') + + elif vcs_checkout_type == 'git': + if git: + return 'git' + + if automation: + raise FatalCheckError('could not resolve Git binary info') + elif vcs_checkout_type: + raise FatalCheckError('unhandled VCS type: %s' % vcs_checkout_type) + +set_config('VCS_CHECKOUT_TYPE', exposed_vcs_checkout_type) + # Host and target systems # ============================================================== option('--host', nargs=1, help='Define the system type performing the build') From 4ed711f73267b1343234384e3949fa0812660f12 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Fri, 14 Jul 2017 17:22:14 -0700 Subject: [PATCH 031/152] Bug 1377216 - Use substs for determining checkout type; r=glandium We now have a variable in config.status for recording the checkout type. These helper functions for determining if we're Mercurial or Git can now be one-liners. As a bonus, we no longer do I/O as part of this function. MozReview-Commit-ID: HT9sbOhDEkf --HG-- extra : rebase_source : 8b53b5f50d14c0bdd4ef3dc7b190314af80a76f0 --- python/mozbuild/mozbuild/base.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/python/mozbuild/mozbuild/base.py b/python/mozbuild/mozbuild/base.py index 789fe3670b1f1..068beb3519626 100644 --- a/python/mozbuild/mozbuild/base.py +++ b/python/mozbuild/mozbuild/base.py @@ -751,24 +751,12 @@ def is_android(cls): @staticmethod def is_hg(cls): """Must have a mercurial source checkout.""" - if hasattr(cls, 'substs'): - top_srcdir = cls.substs.get('top_srcdir') - elif hasattr(cls, 'topsrcdir'): - top_srcdir = cls.topsrcdir - else: - return False - return top_srcdir and os.path.isdir(os.path.join(top_srcdir, '.hg')) + return getattr(cls, 'substs', {}).get('VCS_CHECKOUT_TYPE') == 'hg' @staticmethod def is_git(cls): """Must have a git source checkout.""" - if hasattr(cls, 'substs'): - top_srcdir = cls.substs.get('top_srcdir') - elif hasattr(cls, 'topsrcdir'): - top_srcdir = cls.topsrcdir - else: - return False - return top_srcdir and os.path.exists(os.path.join(top_srcdir, '.git')) + return getattr(cls, 'substs', {}).get('VCS_CHECKOUT_TYPE') == 'git' class PathArgument(object): From ab7253fb0bfdcb643be731d897163dd50721d9a2 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Fri, 14 Jul 2017 17:23:34 -0700 Subject: [PATCH 032/152] Bug 1377216 - Use substs for resolving path to Mercurial and Git; r=glandium We now store HG or GIT in substs. We don't need to search for binary paths. MozReview-Commit-ID: 8sSgPNLok9M --HG-- extra : rebase_source : bc51087bcb9f2a723e27f240dd06a88540f6d8a8 --- python/mozbuild/mozbuild/mach_commands.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/python/mozbuild/mozbuild/mach_commands.py b/python/mozbuild/mozbuild/mach_commands.py index fa7b589bbdfcb..897280254d871 100644 --- a/python/mozbuild/mozbuild/mach_commands.py +++ b/python/mozbuild/mozbuild/mach_commands.py @@ -1629,24 +1629,16 @@ def _make_artifacts(self, tree=None, job=None, skip_cache=False): state_dir = self._mach_context.state_dir cache_dir = os.path.join(state_dir, 'package-frontend') - import which - here = os.path.abspath(os.path.dirname(__file__)) build_obj = MozbuildObject.from_environment(cwd=here) hg = None if conditions.is_hg(build_obj): - if self._is_windows(): - hg = which.which('hg.exe') - else: - hg = which.which('hg') + hg = build_obj.substs['HG'] git = None if conditions.is_git(build_obj): - if self._is_windows(): - git = which.which('git.exe') - else: - git = which.which('git') + git = build_obj.substs['GIT'] from mozbuild.artifacts import Artifacts artifacts = Artifacts(tree, self.substs, self.defines, job, From e4480eabc719f6b8c3bf887c3ceb49535d7b2eef Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Tue, 18 Jul 2017 15:46:47 -0700 Subject: [PATCH 033/152] Bug 1377216 - Use configure-derived VCS info to find repository; r=glandium For reasons I can't explain, Windows builds are failing intermittently because they are unable to locate the `hg` binary when running some SpiderMonkey test processes. These processes use mozversioncontrol.get_repository_from_env() to locate the current repository. We now store VCS info in configure. This makes it available to anything running in a build system context. This commit teaches mozversioncontrol.get_repository_from_env() to import the "buildconfig" module to locate VCS info. If the module can be imported, it is the sole source of VCS info. Otherwise, we fall back to the existing detection mechanisms. This should get rid of the intermittent failure. If it doesn't, it is still a step in the right direction because it will allow build system processes to consistently use a well-defined VCS binary. MozReview-Commit-ID: DMxXheJLRqH --HG-- extra : rebase_source : a9c599934c8c08da1fbb92a9105f5c7cba0867b3 --- .../mozversioncontrol/__init__.py | 56 ++++++++++++++++--- 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/python/mozversioncontrol/mozversioncontrol/__init__.py b/python/mozversioncontrol/mozversioncontrol/__init__.py index 6fefb44f2911c..a25e018d716c6 100644 --- a/python/mozversioncontrol/mozversioncontrol/__init__.py +++ b/python/mozversioncontrol/mozversioncontrol/__init__.py @@ -13,6 +13,9 @@ def get_tool_path(tool): """Obtain the path of `tool`.""" + if os.path.isabs(tool) and os.path.exists(tool): + return tool + # We use subprocess in places, which expects a Win32 executable or # batch script. On some versions of MozillaBuild, we have "hg.exe", # "hg.bat," and "hg" (a Python script). "which" will happily return the @@ -83,8 +86,8 @@ def get_files_in_working_directory(self): class HgRepository(Repository): '''An implementation of `Repository` for Mercurial repositories.''' - def __init__(self, path): - super(HgRepository, self).__init__(path, 'hg') + def __init__(self, path, hg='hg'): + super(HgRepository, self).__init__(path, tool=hg) self._env[b'HGPLAIN'] = b'1' def get_modified_files(self): @@ -112,8 +115,8 @@ def get_files_in_working_directory(self): class GitRepository(Repository): '''An implementation of `Repository` for Git repositories.''' - def __init__(self, path): - super(GitRepository, self).__init__(path, 'git') + def __init__(self, path, git='git'): + super(GitRepository, self).__init__(path, tool=git) def get_modified_files(self): return self._run('diff', '--diff-filter=M', '--name-only').splitlines() @@ -148,8 +151,47 @@ def get_repository_object(path): path) +class MissingVCSInfo(Exception): + """Represents a general failure to resolve a VCS interface.""" + + +class MissingConfigureInfo(MissingVCSInfo): + """Represents error finding VCS info from configure data.""" + + def get_repository_from_env(): - """Obtain a repository object by looking at the environment.""" + """Obtain a repository object by looking at the environment. + + If inside a build environment (denoted by presence of a ``buildconfig`` + module), VCS info is obtained from it, as found via configure. This allows + us to respect what was passed into configure. Otherwise, we fall back to + scanning the filesystem. + """ + try: + import buildconfig + + flavor = buildconfig.substs.get('VCS_CHECKOUT_TYPE') + + # If in build mode, only use what configure found. That way we ensure + # that everything in the build system can be controlled via configure. + if not flavor: + raise MissingConfigureInfo('could not find VCS_CHECKOUT_TYPE ' + 'in build config; check configure ' + 'output and verify it could find a ' + 'VCS binary') + + if flavor == 'hg': + return HgRepository(buildconfig.topsrcdir, + hg=buildconfig.substs['HG']) + elif flavor == 'git': + return GitRepository(buildconfig.topsrcdir, + git=buildconfig.subst['GIT']) + else: + raise MissingVCSInfo('unknown VCS_CHECKOUT_TYPE value: %s' % flavor) + + except ImportError: + pass + def ancestors(path): while path: yield path @@ -163,5 +205,5 @@ def ancestors(path): except InvalidRepoPath: continue - raise Exception('Could not find Mercurial or Git checkout for %s' % - os.getcwd()) + raise MissingVCSInfo('Could not find Mercurial or Git checkout for %s' % + os.getcwd()) From f4e4b2bc681c8f9b8c2a03866e33c61407224c16 Mon Sep 17 00:00:00 2001 From: k88hudson Date: Thu, 20 Jul 2017 14:32:39 -0400 Subject: [PATCH 034/152] Bug 1382746 - Provide a helper function to remove the preloaded browser r=Mardak MozReview-Commit-ID: 2PSIhwvKTKA --HG-- extra : rebase_source : bc24f4c640ee5b8d5f8305f29fde7d56f103070e --- browser/base/content/tabbrowser.xml | 20 ++++++++++++++++--- .../test/browser_background_tab_crash.js | 5 +---- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/browser/base/content/tabbrowser.xml b/browser/base/content/tabbrowser.xml index 40e9c5b3e510a..713b6b5d39c90 100644 --- a/browser/base/content/tabbrowser.xml +++ b/browser/base/content/tabbrowser.xml @@ -1980,6 +1980,22 @@ + + + + + + null @@ -5694,9 +5710,7 @@ // Preloaded browsers do not actually have any tabs. If one crashes, // it should be released and removed. if (browser === this._preloadedBrowser) { - // Calling _getPreloadedBrowser is necessary to actually consume the preloaded browser - let preloaded = this._getPreloadedBrowser(); - preloaded.remove(); + this.removePreloadedBrowser(); return; } diff --git a/browser/components/sessionstore/test/browser_background_tab_crash.js b/browser/components/sessionstore/test/browser_background_tab_crash.js index bd66b94d265b2..9df18999a32f1 100644 --- a/browser/components/sessionstore/test/browser_background_tab_crash.js +++ b/browser/components/sessionstore/test/browser_background_tab_crash.js @@ -234,10 +234,7 @@ add_task(async function test_preload_crash() { }); // Release any existing preloaded browser - let preloaded = gBrowser._getPreloadedBrowser(); - if (preloaded) { - preloaded.remove(); - } + gBrowser.removePreloadedBrowser(); // Create a fresh preloaded browser gBrowser._createPreloadBrowser(); From c00d64f6c6148152f3bf4a1d8e1263f290624ebc Mon Sep 17 00:00:00 2001 From: Andrew McCreight Date: Tue, 18 Jul 2017 10:36:05 -0700 Subject: [PATCH 035/152] Bug 1381921, part 1 - Remove nsIXPConnect::CurrentNativeCallContext. r=mrbkap It is only called in a single place, and can't be called from JS, so inline it and eliminate it. MozReview-Commit-ID: DWfyfoO5Zht --HG-- extra : rebase_source : 8a44719af22a4d8724449d6225f4bdd119d648c8 --- js/xpconnect/idl/nsIXPConnect.idl | 3 --- js/xpconnect/src/Sandbox.cpp | 6 ++---- js/xpconnect/src/nsXPConnect.cpp | 9 --------- 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/js/xpconnect/idl/nsIXPConnect.idl b/js/xpconnect/idl/nsIXPConnect.idl index 2c186c2d6df00..97a415e7ce72c 100644 --- a/js/xpconnect/idl/nsIXPConnect.idl +++ b/js/xpconnect/idl/nsIXPConnect.idl @@ -17,7 +17,6 @@ struct JSFreeOp; class nsWrapperCache; -class nsAXPCNativeCallContext; %} /***************************************************************************/ @@ -32,7 +31,6 @@ class nsAXPCNativeCallContext; native JSEqualityOp(JSEqualityOp); [ptr] native JSScriptPtr(JSScript); [ptr] native voidPtrPtr(void*); -[ptr] native nsAXPCNativeCallContextPtr(nsAXPCNativeCallContext); [ptr] native nsWrapperCachePtr(nsWrapperCache); [ref] native JSCompartmentOptions(JS::CompartmentOptions); [ref] native JSCallArgsRef(const JS::CallArgs); @@ -415,7 +413,6 @@ interface nsIXPConnect : nsISupports // Will return null if there is no JS stack right now. readonly attribute nsIStackFrame CurrentJSStack; - readonly attribute nsAXPCNativeCallContextPtr CurrentNativeCallContext; void debugDump(in short depth); void debugDumpObject(in nsISupports aCOMObj, in short depth); diff --git a/js/xpconnect/src/Sandbox.cpp b/js/xpconnect/src/Sandbox.cpp index 29f3ab5e3dcaf..7ba1bfeb82a5b 100644 --- a/js/xpconnect/src/Sandbox.cpp +++ b/js/xpconnect/src/Sandbox.cpp @@ -1703,15 +1703,13 @@ AssembleSandboxMemoryReporterName(JSContext* cx, nsCString& sandboxName) if (sandboxName.IsEmpty()) sandboxName = NS_LITERAL_CSTRING("[anonymous sandbox]"); - nsXPConnect* xpc = nsXPConnect::XPConnect(); // Get the xpconnect native call context. - nsAXPCNativeCallContext* cc = nullptr; - xpc->GetCurrentNativeCallContext(&cc); + XPCCallContext* cc = XPCJSContext::Get()->GetCallContext(); NS_ENSURE_TRUE(cc, NS_ERROR_INVALID_ARG); // Get the current source info from xpc. nsCOMPtr frame; - xpc->GetCurrentJSStack(getter_AddRefs(frame)); + nsXPConnect::XPConnect()->GetCurrentJSStack(getter_AddRefs(frame)); // Append the caller's location information. if (frame) { diff --git a/js/xpconnect/src/nsXPConnect.cpp b/js/xpconnect/src/nsXPConnect.cpp index 887c0ca0bd61a..1c25e19a9ecd9 100644 --- a/js/xpconnect/src/nsXPConnect.cpp +++ b/js/xpconnect/src/nsXPConnect.cpp @@ -836,15 +836,6 @@ nsXPConnect::GetCurrentJSStack(nsIStackFrame * *aCurrentJSStack) return NS_OK; } -NS_IMETHODIMP -nsXPConnect::GetCurrentNativeCallContext(nsAXPCNativeCallContext * *aCurrentNativeCallContext) -{ - MOZ_ASSERT(aCurrentNativeCallContext, "bad param"); - - *aCurrentNativeCallContext = XPCJSContext::Get()->GetCallContext(); - return NS_OK; -} - NS_IMETHODIMP nsXPConnect::SetFunctionThisTranslator(const nsIID & aIID, nsIXPCFunctionThisTranslator* aTranslator) From c77f1723bee9079551141d90431496e959f7d166 Mon Sep 17 00:00:00 2001 From: Andrew McCreight Date: Tue, 18 Jul 2017 10:42:47 -0700 Subject: [PATCH 036/152] Bug 1381921, part 2 - Remove nsAXPCNativeCallContext. r=mrbkap This class is no longer used. MozReview-Commit-ID: Lv9AAd3OjIl --HG-- extra : rebase_source : b37abc2c70a2a08b176d0504163c246b6ff3b8cf --- dom/base/nsJSEnvironment.cpp | 1 - dom/base/nsJSTimeoutHandler.cpp | 1 - js/xpconnect/public/moz.build | 1 - js/xpconnect/public/nsAXPCNativeCallContext.h | 32 ---------- js/xpconnect/src/XPCCallContext.cpp | 61 ------------------- js/xpconnect/src/xpcprivate.h | 12 +--- 6 files changed, 1 insertion(+), 107 deletions(-) delete mode 100644 js/xpconnect/public/nsAXPCNativeCallContext.h diff --git a/dom/base/nsJSEnvironment.cpp b/dom/base/nsJSEnvironment.cpp index cd2c739b87c58..0cdffb1079aec 100644 --- a/dom/base/nsJSEnvironment.cpp +++ b/dom/base/nsJSEnvironment.cpp @@ -62,7 +62,6 @@ #include "mozilla/dom/Element.h" #include "mozilla/dom/ErrorEvent.h" #include "mozilla/dom/ScriptSettings.h" -#include "nsAXPCNativeCallContext.h" #include "mozilla/CycleCollectedJSRuntime.h" #include "mozilla/SystemGroup.h" #include "nsRefreshDriver.h" diff --git a/dom/base/nsJSTimeoutHandler.cpp b/dom/base/nsJSTimeoutHandler.cpp index bc46e1ac00a93..e52858b18fe7f 100644 --- a/dom/base/nsJSTimeoutHandler.cpp +++ b/dom/base/nsJSTimeoutHandler.cpp @@ -10,7 +10,6 @@ #include "mozilla/Likely.h" #include "mozilla/Maybe.h" #include "mozilla/dom/FunctionBinding.h" -#include "nsAXPCNativeCallContext.h" #include "nsCOMPtr.h" #include "nsContentUtils.h" #include "nsError.h" diff --git a/js/xpconnect/public/moz.build b/js/xpconnect/public/moz.build index b6650e3953dcc..e91a4a5386547 100644 --- a/js/xpconnect/public/moz.build +++ b/js/xpconnect/public/moz.build @@ -5,7 +5,6 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. EXPORTS += [ - 'nsAXPCNativeCallContext.h', 'nsTArrayHelpers.h', 'xpc_make_class.h', 'xpc_map_end.h', diff --git a/js/xpconnect/public/nsAXPCNativeCallContext.h b/js/xpconnect/public/nsAXPCNativeCallContext.h deleted file mode 100644 index aeafe7ebefebf..0000000000000 --- a/js/xpconnect/public/nsAXPCNativeCallContext.h +++ /dev/null @@ -1,32 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ -/* vim: set ts=8 sts=4 et sw=4 tw=99: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef nsAXPCNativeCallContext_h__ -#define nsAXPCNativeCallContext_h__ - -/** -* A native call context is allocated on the stack when XPConnect calls a -* native method. Holding a pointer to this object beyond the currently -* executing stack frame is not permitted. -*/ -class nsAXPCNativeCallContext -{ -public: - NS_IMETHOD GetCallee(nsISupports** aResult) = 0; - NS_IMETHOD GetCalleeMethodIndex(uint16_t* aResult) = 0; - NS_IMETHOD GetJSContext(JSContext** aResult) = 0; - NS_IMETHOD GetArgc(uint32_t* aResult) = 0; - NS_IMETHOD GetArgvPtr(JS::Value** aResult) = 0; - - // Methods added since mozilla 0.6.... - - NS_IMETHOD GetCalleeInterface(nsIInterfaceInfo** aResult) = 0; - NS_IMETHOD GetCalleeClassInfo(nsIClassInfo** aResult) = 0; - - NS_IMETHOD GetPreviousCallContext(nsAXPCNativeCallContext** aResult) = 0; -}; - -#endif diff --git a/js/xpconnect/src/XPCCallContext.cpp b/js/xpconnect/src/XPCCallContext.cpp index 16f8341ac4f4a..dbbd500717460 100644 --- a/js/xpconnect/src/XPCCallContext.cpp +++ b/js/xpconnect/src/XPCCallContext.cpp @@ -213,64 +213,3 @@ XPCCallContext::~XPCCallContext() MOZ_ASSERT(old == this, "bad pop from per thread data"); } } - -NS_IMETHODIMP -XPCCallContext::GetCallee(nsISupports * *aCallee) -{ - nsCOMPtr rval = mWrapper ? mWrapper->GetIdentityObject() : nullptr; - rval.forget(aCallee); - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetCalleeMethodIndex(uint16_t* aCalleeMethodIndex) -{ - *aCalleeMethodIndex = mMethodIndex; - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetCalleeInterface(nsIInterfaceInfo * *aCalleeInterface) -{ - nsCOMPtr rval = mInterface->GetInterfaceInfo(); - rval.forget(aCalleeInterface); - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetCalleeClassInfo(nsIClassInfo * *aCalleeClassInfo) -{ - nsCOMPtr rval = mWrapper ? mWrapper->GetClassInfo() : nullptr; - rval.forget(aCalleeClassInfo); - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetJSContext(JSContext * *aJSContext) -{ - JS_AbortIfWrongThread(mJSContext); - *aJSContext = mJSContext; - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetArgc(uint32_t* aArgc) -{ - *aArgc = (uint32_t) mArgc; - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetArgvPtr(Value** aArgvPtr) -{ - *aArgvPtr = mArgv; - return NS_OK; -} - -NS_IMETHODIMP -XPCCallContext::GetPreviousCallContext(nsAXPCNativeCallContext** aResult) -{ - NS_ENSURE_ARG_POINTER(aResult); - *aResult = GetPrevCallContext(); - return NS_OK; -} diff --git a/js/xpconnect/src/xpcprivate.h b/js/xpconnect/src/xpcprivate.h index e759f3d29641d..0902fed706d12 100644 --- a/js/xpconnect/src/xpcprivate.h +++ b/js/xpconnect/src/xpcprivate.h @@ -151,7 +151,6 @@ #include "SandboxPrivate.h" #include "BackstagePass.h" -#include "nsAXPCNativeCallContext.h" #ifdef XP_WIN // Nasty MS defines @@ -704,18 +703,9 @@ XPCJSContext::GetStringName(unsigned index) const // // Note that most accessors are inlined. -class MOZ_STACK_CLASS XPCCallContext final : public nsAXPCNativeCallContext +class MOZ_STACK_CLASS XPCCallContext final { public: - NS_IMETHOD GetCallee(nsISupports** aResult); - NS_IMETHOD GetCalleeMethodIndex(uint16_t* aResult); - NS_IMETHOD GetJSContext(JSContext** aResult); - NS_IMETHOD GetArgc(uint32_t* aResult); - NS_IMETHOD GetArgvPtr(JS::Value** aResult); - NS_IMETHOD GetCalleeInterface(nsIInterfaceInfo** aResult); - NS_IMETHOD GetCalleeClassInfo(nsIClassInfo** aResult); - NS_IMETHOD GetPreviousCallContext(nsAXPCNativeCallContext** aResult); - enum {NO_ARGS = (unsigned) -1}; explicit XPCCallContext(JSContext* cx, From 6f0154d4c2534b555c37f4f9f27ff0e472b5c3e7 Mon Sep 17 00:00:00 2001 From: Gregory Szorc Date: Thu, 20 Jul 2017 15:08:20 -0700 Subject: [PATCH 037/152] Bug 1377216 - Add dependency on --help to vcs_checkout_type; r=glandium This is now required as of 00ef8018730c. Landing on a CLOSED TREE. --HG-- extra : rebase_source : 410e4a677cdc1f1238f778a00078ac6e6de420bd extra : amend_source : e81cf510948bedd76ac77790eb96aae1fe3cda1c --- build/moz.configure/init.configure | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/moz.configure/init.configure b/build/moz.configure/init.configure index 24f95a36b0b06..9482f7da976f8 100644 --- a/build/moz.configure/init.configure +++ b/build/moz.configure/init.configure @@ -312,10 +312,10 @@ def shell(value, mozillabuild): # Source checkout and version control integration. # ================================================ -@depends(check_build_environment, 'MOZ_AUTOMATION') +@depends(check_build_environment, 'MOZ_AUTOMATION', '--help') @checking('for vcs source checkout') @imports('os') -def vcs_checkout_type(build_env, automation): +def vcs_checkout_type(build_env, automation, _): if os.path.exists(os.path.join(build_env.topsrcdir, '.hg')): return 'hg' elif os.path.exists(os.path.join(build_env.topsrcdir, '.git')): From 94dfc3b4c7019d82d4e94701564e280fd722bb16 Mon Sep 17 00:00:00 2001 From: Wes Kocher Date: Thu, 20 Jul 2017 15:42:46 -0700 Subject: [PATCH 038/152] Bug 1358670 - Turn these jobs to tier-3 until they get cleaned up a=bustage CLOSED TREE MozReview-Commit-ID: 2ulyyrULBEY --HG-- extra : amend_source : 965f74488ab64d360a9cabdbcd693b5c088a8186 --- taskcluster/ci/test/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskcluster/ci/test/tests.yml b/taskcluster/ci/test/tests.yml index ce8f13534fbfb..d8c6b88af596d 100644 --- a/taskcluster/ci/test/tests.yml +++ b/taskcluster/ci/test/tests.yml @@ -1516,7 +1516,7 @@ telemetry-tests-client: max-run-time: 5400 checkout: true e10s: true - tier: 2 + tier: 3 docker-image: {"in-tree": "desktop1604-test"} mozharness: script: telemetry/telemetry_client.py From 4d3b4a7b1a36f9957620a6129d48923d5aae3d16 Mon Sep 17 00:00:00 2001 From: Wes Kocher Date: Thu, 20 Jul 2017 15:47:48 -0700 Subject: [PATCH 039/152] Bug 1341102 - Update expectation data a=bustage CLOSED TREE MozReview-Commit-ID: 603WKr3HLo0 --- layout/style/test/stylo-failures.md | 1 - 1 file changed, 1 deletion(-) diff --git a/layout/style/test/stylo-failures.md b/layout/style/test/stylo-failures.md index 1590b1c97b0fd..98b5b2efdd00d 100644 --- a/layout/style/test/stylo-failures.md +++ b/layout/style/test/stylo-failures.md @@ -56,7 +56,6 @@ to mochitest command. * test_computed_style.html `gradient` [1] * Unit should be preserved after parsing servo/servo#15346 * test_units_time.html [1] -* test_css_supports.html: issues around @supports syntax servo/servo#15482 [2] * test_author_specified_style.html: support serializing color as author specified bug 1348165 [27] * browser_newtab_share_rule_processors.js: agent style sheet sharing [1] * :visited support bug 1381635 From b630c357252b7f6bc8256efc0f7bc827b62f714c Mon Sep 17 00:00:00 2001 From: "J. Ryan Stinnett" Date: Thu, 20 Jul 2017 18:00:23 -0500 Subject: [PATCH 040/152] Bug 1341102 - Update reftest annotations. r=me CLOSED TREE MozReview-Commit-ID: KmLgQcOK9Jf --- layout/reftests/bugs/reftest.list | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/layout/reftests/bugs/reftest.list b/layout/reftests/bugs/reftest.list index b9554cb74c668..84f86e3a2db9c 100644 --- a/layout/reftests/bugs/reftest.list +++ b/layout/reftests/bugs/reftest.list @@ -1780,11 +1780,11 @@ fuzzy-if(OSX,1,364) fuzzy-if(skiaContent,1,320) == 846144-1.html 846144-1-ref.ht != 860370.html 860370-notref.html == 871338-1.html 871338-1-ref.html == 875060-1.html 875060-1-ref.html -fails-if(styloVsGecko||stylo) == 883987-1a.html 883987-1-ref.html +== 883987-1a.html 883987-1-ref.html fails-if(styloVsGecko||stylo) == 883987-1b.html 883987-1-ref.html fails-if(styloVsGecko||stylo) == 883987-1c.html 883987-1-ref.html -fails-if(styloVsGecko||stylo) == 883987-1d.html 883987-1-ref.html -fails-if(styloVsGecko||stylo) == 883987-1e.html 883987-1-ref.html +== 883987-1d.html 883987-1-ref.html +== 883987-1e.html 883987-1-ref.html == 883987-1f.html 883987-1-ref.html == 890495-1.html 890495-1-ref.html == 894931-1.html 894931-1-ref.html From b83e245b8d647cb00c0ea55a873272a4f3c006d6 Mon Sep 17 00:00:00 2001 From: Hiroyuki Ikezoe Date: Thu, 20 Jul 2017 14:21:34 -0700 Subject: [PATCH 041/152] =?UTF-8?q?servo:=20Merge=20#17793=20-=20Call=20ma?= =?UTF-8?q?y=5Fhave=5Fanimations()=20for=20parent=20element=20in=20the=20c?= =?UTF-8?q?ase=20where=20the=20t=E2=80=A6=20(from=20hiikezoe:may-have-anim?= =?UTF-8?q?ations-check);=20r=3Dbirtles?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …arget is pseudo element. In case of pseudo elements ElementHasAnimations is set on the parent element. updating-animation-on-pseudo-element.html fails without this patch, succeeds with this patch. https://bugzilla.mozilla.org/show_bug.cgi?id=1367278 --- - [X] `./mach build -d` does not report any errors - [X] `./mach test-tidy` does not report any errors Source-Repo: https://github.com/servo/servo Source-Revision: a303696ae3273dfd32c8663c6147692696bdbc08 --HG-- extra : subtree_source : https%3A//hg.mozilla.org/projects/converted-servo-linear extra : subtree_revision : 37101ba76a297f34c1cb86f68d81b7e5f69144e2 --- servo/components/style/gecko/wrapper.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/servo/components/style/gecko/wrapper.rs b/servo/components/style/gecko/wrapper.rs index 9536ed6277acc..59f128086ed19 100644 --- a/servo/components/style/gecko/wrapper.rs +++ b/servo/components/style/gecko/wrapper.rs @@ -1089,6 +1089,16 @@ impl<'le> TElement for GeckoElement<'le> { #[inline] fn may_have_animations(&self) -> bool { + if let Some(pseudo) = self.implemented_pseudo_element() { + if !pseudo.is_before_or_after() { + return false; + } + return self.parent_element() + .map_or(false, |p| { + p.as_node() + .get_bool_flag(nsINode_BooleanFlag::ElementHasAnimations) + }); + } self.as_node().get_bool_flag(nsINode_BooleanFlag::ElementHasAnimations) } From eed2b90152804ed62ec855aaf2941c38b4f2618d Mon Sep 17 00:00:00 2001 From: Manish Goregaokar Date: Thu, 20 Jul 2017 16:07:51 -0700 Subject: [PATCH 042/152] servo: Merge #17809 - Stop asserting when cached system font changes (from Manishearth:stylo-system-anim); r=hiikezoe It may change during animation fixes https://bugzilla.mozilla.org/show_bug.cgi?id=1382672 Source-Repo: https://github.com/servo/servo Source-Revision: 4616f4ae772b15ed576ba4079638fcfdec4ac3de --HG-- extra : subtree_source : https%3A//hg.mozilla.org/projects/converted-servo-linear extra : subtree_revision : 7ee5fd50fc33d600ab3f3713b01efb2acf6baffd --- servo/components/style/properties/longhand/font.mako.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/servo/components/style/properties/longhand/font.mako.rs b/servo/components/style/properties/longhand/font.mako.rs index 39f611e7d02ac..538ffe3b0438c 100644 --- a/servo/components/style/properties/longhand/font.mako.rs +++ b/servo/components/style/properties/longhand/font.mako.rs @@ -2489,11 +2489,13 @@ ${helpers.single_keyword("-moz-math-variant", /// Must be called before attempting to compute a system font /// specified value pub fn resolve_system_font(system: SystemFont, context: &mut Context) { - if context.cached_system_font.is_none() { + // Checking if context.cached_system_font.is_none() isn't enough, + // if animating from one system font to another the cached system font + // may change + if Some(system) != context.cached_system_font.as_ref().map(|x| x.system_font) { let computed = system.to_computed_value(context); context.cached_system_font = Some(computed); } - debug_assert!(system == context.cached_system_font.as_ref().unwrap().system_font) } #[derive(Clone, Debug)] From 963412d5e3bb1335c4f20b3d539996ff05539b15 Mon Sep 17 00:00:00 2001 From: Jim Chen Date: Thu, 20 Jul 2017 18:24:12 -0400 Subject: [PATCH 043/152] Bug 1381924 - Include WebRTC bits for GeckoView; r=nalexander Include necessary WebRTC files and permissions for GeckoView. For permissions, we need to add the RECORD_AUDIO permission to GeckoView's AndroidManifest.xml, but since the file is not preprocessed, we can't use an `#ifdef MOZ_WEBRTC` block, so I think we'll just have to unconditionally include the permission. MozReview-Commit-ID: IUd8FFMsW99 --HG-- extra : rebase_source : b75462d53e6bd05b324e8551c888853c8678ec6b --- mobile/android/app/build.gradle | 7 ------- mobile/android/base/Makefile.in | 8 ++++---- mobile/android/geckoview/build.gradle | 11 +++++------ .../geckoview/src/main/AndroidManifest.xml | 16 +++++++++------- 4 files changed, 18 insertions(+), 24 deletions(-) diff --git a/mobile/android/app/build.gradle b/mobile/android/app/build.gradle index a6d702af64f2b..bdf9b89e3842a 100644 --- a/mobile/android/app/build.gradle +++ b/mobile/android/app/build.gradle @@ -148,13 +148,6 @@ android { exclude 'org/mozilla/gecko/MediaPlayerManager.java' } - if (mozconfig.substs.MOZ_WEBRTC) { - srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/base/java/src" - srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src" - srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src" - srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/video_render/android/java/src" - } - if (mozconfig.substs.MOZ_INSTALL_TRACKING) { exclude 'org/mozilla/gecko/adjust/StubAdjustHelper.java' } else { diff --git a/mobile/android/base/Makefile.in b/mobile/android/base/Makefile.in index 7a8f257be14af..8244caa9bb3c6 100644 --- a/mobile/android/base/Makefile.in +++ b/mobile/android/base/Makefile.in @@ -154,6 +154,10 @@ ifdef MOZ_ANDROID_HLS_SUPPORT GECKOVIEW_JARS += exoplayer2.jar endif +ifdef MOZ_WEBRTC +GECKOVIEW_JARS += webrtc.jar +endif + ifdef MOZ_INSTALL_TRACKING GECKOVIEW_JARS += gecko-thirdparty-adjust_sdk.jar endif @@ -170,10 +174,6 @@ FENNEC_JARS = \ services.jar \ $(NULL) -ifdef MOZ_WEBRTC -FENNEC_JARS += webrtc.jar -endif - ifdef MOZ_ANDROID_SEARCH_ACTIVITY FENNEC_JARS += search-activity.jar endif diff --git a/mobile/android/geckoview/build.gradle b/mobile/android/geckoview/build.gradle index 640b7e37f3044..f8971d5758b1d 100644 --- a/mobile/android/geckoview/build.gradle +++ b/mobile/android/geckoview/build.gradle @@ -79,12 +79,11 @@ android { exclude 'com/google/android/exoplayer2/**' } - // TODO: support WebRTC. - // if (mozconfig.substs.MOZ_WEBRTC) { - // srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src" - // srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src" - // srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/video_render/android/java/src" - // } + if (mozconfig.substs.MOZ_WEBRTC) { + srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/base/java/src" + srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src" + srcDir "${topsrcdir}/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src" + } // TODO: don't use AppConstants. srcDir "${project.buildDir}/generated/source/preprocessed_code" // See syncPreprocessedCode. diff --git a/mobile/android/geckoview/src/main/AndroidManifest.xml b/mobile/android/geckoview/src/main/AndroidManifest.xml index 62137f7656149..f430f4a3e38bd 100644 --- a/mobile/android/geckoview/src/main/AndroidManifest.xml +++ b/mobile/android/geckoview/src/main/AndroidManifest.xml @@ -22,17 +22,19 @@ - - - - - - - + + + --> + --> + --> + --> + + From ec84922bddc590a5d47a6e511ba1219f30a8c82b Mon Sep 17 00:00:00 2001 From: Jim Chen Date: Thu, 20 Jul 2017 17:52:13 -0400 Subject: [PATCH 044/152] Bug 1322586 - 1. Add "always listen" option for GeckoViewHandler; r=esawin It would be nice to reuse GeckoViewHandler for delegates too (e.g. for the new PermissionDelegate), but delegates need to always listen to their events. This patch adds an "always listen" option for GeckoViewHandler that still keeps track of the current listener/delegate instance, but registers for all events at initialization rather than only when there is a listener. MozReview-Commit-ID: JtskpZMy9T4 --HG-- extra : rebase_source : 99170bc0b19d605dde4dbf08690bb31d310ed113 --- .../org/mozilla/gecko/GeckoViewHandler.java | 35 +++++++++++++++---- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoViewHandler.java b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoViewHandler.java index e829ff1926d92..9e4b5e5ff2136 100644 --- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoViewHandler.java +++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoViewHandler.java @@ -20,13 +20,25 @@ private static final boolean DEBUG = false; private Listener mListener; + private final boolean mAlwaysListen; private final String mModuleName; private final String[] mEvents; + GeckoViewHandler(final String module, final GeckoView view, final String[] events) { + this(module, view, events, /* alwaysListen */ false); + } + + GeckoViewHandler(final String module, final GeckoView view, + final String[] events, final boolean alwaysListen) { + mAlwaysListen = alwaysListen; mModuleName = module; mEvents = events; + + if (alwaysListen) { + register(view.getEventDispatcher()); + } } public Listener getListener() { @@ -35,20 +47,29 @@ public Listener getListener() { public void setListener(final Listener listener, final GeckoView view) { final EventDispatcher eventDispatcher = view.getEventDispatcher(); + if (mListener == listener) { + return; + } - if (mListener != null && mListener != listener) { - final GeckoBundle msg = new GeckoBundle(1); - msg.putString("module", mModuleName); - eventDispatcher.dispatch("GeckoView:Unregister", msg); - eventDispatcher.unregisterUiThreadListener(this, mEvents); + if (!mAlwaysListen && mListener != null) { + unregister(eventDispatcher); } mListener = listener; - if (mListener == null) { - return; + if (!mAlwaysListen && mListener != null) { + register(eventDispatcher); } + } + + private void unregister(final EventDispatcher eventDispatcher) { + final GeckoBundle msg = new GeckoBundle(1); + msg.putString("module", mModuleName); + eventDispatcher.dispatch("GeckoView:Unregister", msg); + eventDispatcher.unregisterUiThreadListener(this, mEvents); + } + private void register(final EventDispatcher eventDispatcher) { final GeckoBundle msg = new GeckoBundle(1); msg.putString("module", mModuleName); eventDispatcher.dispatch("GeckoView:Register", msg); From 808114784e0a3111160da7fe05bbfde61c5513c4 Mon Sep 17 00:00:00 2001 From: Jim Chen Date: Thu, 20 Jul 2017 17:52:13 -0400 Subject: [PATCH 045/152] Bug 1322586 - 2. Support content event forwarding in Messaging.jsm; r=esawin For a lot of GeckoView content process code, we send a message from content to chrome, using a message manager, only to forward the message to Java as an event, using an event dispatcher. This patch gives Messaging.jsm the ability to be used in content process directly, and lets events transparently pass through to Java. MozReview-Commit-ID: 15dKKpQCXqJ --HG-- extra : rebase_source : 3ab93052de3676244843c55c50ae5ade63ab8f11 --- .../android/modules/geckoview/Messaging.jsm | 86 +++++++++++++++++-- 1 file changed, 81 insertions(+), 5 deletions(-) diff --git a/mobile/android/modules/geckoview/Messaging.jsm b/mobile/android/modules/geckoview/Messaging.jsm index a5edb570c52d4..a59fcef7efcc2 100644 --- a/mobile/android/modules/geckoview/Messaging.jsm +++ b/mobile/android/modules/geckoview/Messaging.jsm @@ -6,15 +6,19 @@ const { classes: Cc, interfaces: Ci, utils: Cu } = Components; Cu.import("resource://gre/modules/Services.jsm"); -Cu.import("resource://gre/modules/Task.jsm"); Cu.import("resource://gre/modules/XPCOMUtils.jsm"); this.EXPORTED_SYMBOLS = ["sendMessageToJava", "Messaging", "EventDispatcher"]; -XPCOMUtils.defineLazyServiceGetter(this, "uuidgen", +XPCOMUtils.defineLazyModuleGetter(this, "Task", "resource://gre/modules/Task.jsm"); + +XPCOMUtils.defineLazyServiceGetter(this, "UUIDGen", "@mozilla.org/uuid-generator;1", "nsIUUIDGenerator"); +const IS_PARENT_PROCESS = (Services.appinfo.processType == + Services.appinfo.PROCESS_TYPE_DEFAULT); + function sendMessageToJava(aMessage, aCallback) { Cu.reportError("sendMessageToJava is deprecated. Use EventDispatcher instead."); @@ -39,6 +43,9 @@ DispatcherDelegate.prototype = { * @param events String or array of strings of events to listen to. */ registerListener: function (listener, events) { + if (!IS_PARENT_PROCESS) { + throw new Error("Can only listen in parent process"); + } this._dispatcher.registerListener(listener, events); }, @@ -49,6 +56,9 @@ DispatcherDelegate.prototype = { * @param events String or array of strings of events to stop listening to. */ unregisterListener: function (listener, events) { + if (!IS_PARENT_PROCESS) { + throw new Error("Can only listen in parent process"); + } this._dispatcher.unregisterListener(listener, events); }, @@ -62,6 +72,33 @@ DispatcherDelegate.prototype = { * @param callback Optional callback implementing nsIAndroidEventCallback. */ dispatch: function (event, data, callback) { + if (!IS_PARENT_PROCESS) { + let mm = this._dispatcher || Services.cpmm; + let data = { + global: !this._dispatcher, + event: event, + data: data, + }; + + if (callback) { + data.uuid = UUIDGen.generateUUID().toString(); + mm.addMessageListener("GeckoView:MessagingReply", function listener(msg) { + if (msg.data.uuid === data.uuid) { + mm.removeMessageListener(msg.name, listener); + if (msg.data.type === "success") { + callback.onSuccess(msg.data.response); + } else if (msg.data.type === "error") { + callback.onError(msg.data.response); + } else { + throw new Error("invalid reply type"); + } + } + }); + } + + mm.sendAsyncMessage("GeckoView:Messaging", data); + return; + } this._dispatcher.dispatch(event, data, callback); }, @@ -92,8 +129,8 @@ DispatcherDelegate.prototype = { msg.type = undefined; this.dispatch(type, msg, { - onSuccess: response => resolve(response), - onError: response => reject(response) + onSuccess: resolve, + onError: reject, }); }); }, @@ -175,9 +212,15 @@ DispatcherDelegate.prototype = { }; var EventDispatcher = { - instance: new DispatcherDelegate(Services.androidBridge), + instance: new DispatcherDelegate(IS_PARENT_PROCESS ? Services.androidBridge : undefined), for: function (window) { + if (!IS_PARENT_PROCESS) { + if (!window.messageManager) { + throw new Error("window does not have message manager"); + } + return new DispatcherDelegate(window.messageManager); + } let view = window && window.arguments && window.arguments[0] && window.arguments[0].QueryInterface(Ci.nsIAndroidView); if (!view) { @@ -185,8 +228,41 @@ var EventDispatcher = { } return new DispatcherDelegate(view); }, + + receiveMessage: function (aMsg) { + // aMsg.data includes keys: global, event, data, uuid + let callback; + if (aMsg.data.uuid) { + let reply = (type, response) => { + let mm = aMsg.data.global ? aMsg.target : aMsg.target.messageManager; + mm.sendAsyncMessage("GeckoView:MessagingReply", { + type: type, + response: response, + uuid: aMsg.data.uuid, + }); + }; + callback = { + onSuccess: response => reply("success", response), + onError: error => reply("error", error), + }; + } + + if (aMsg.data.global) { + this.instance.dispatch(aMsg.data.event, aMsg.data.data. callback); + return; + } + + let win = aMsg.target.contentWindow || aMsg.target.ownerGlobal; + let dispatcher = win.WindowEventDispatcher || this.for(win); + dispatcher.dispatch(aMsg.data.event, aMsg.data.data, callback); + }, }; +if (IS_PARENT_PROCESS) { + Services.mm.addMessageListener("GeckoView:Messaging", EventDispatcher); + Services.ppmm.addMessageListener("GeckoView:Messaging", EventDispatcher); +} + // For backwards compatibility. var Messaging = {}; From 931069320f0f498955b4994128de6044c3e0fcf4 Mon Sep 17 00:00:00 2001 From: Jim Chen Date: Thu, 20 Jul 2017 17:52:14 -0400 Subject: [PATCH 046/152] Bug 1322586 - 3. Add GeckoViewPermission.js for handling permissions; r=esawin Add GeckoViewPermission.js, which is responsible for handling requests from Gecko for permissions, and passing those requests to GeckoView. MozReview-Commit-ID: C0g24hKEYbf --HG-- extra : rebase_source : fe949fde01ddba207b12f2bd49e52ea2bc8b8c5e --- .../components/geckoview/GeckoView.manifest | 5 + .../geckoview/GeckoViewPermission.js | 250 ++++++++++++++++++ mobile/android/components/geckoview/moz.build | 1 + mobile/android/installer/package-manifest.in | 1 + 4 files changed, 257 insertions(+) create mode 100644 mobile/android/components/geckoview/GeckoViewPermission.js diff --git a/mobile/android/components/geckoview/GeckoView.manifest b/mobile/android/components/geckoview/GeckoView.manifest index 680a31a6a1541..5861980f36a0a 100644 --- a/mobile/android/components/geckoview/GeckoView.manifest +++ b/mobile/android/components/geckoview/GeckoView.manifest @@ -1,6 +1,11 @@ # Stylesheets category agent-style-sheets browser-content-stylesheet chrome://geckoview/skin/content.css +# GeckoViewPermission.js +component {42f3c238-e8e8-4015-9ca2-148723a8afcf} GeckoViewPermission.js +contract @mozilla.org/content-permission/prompt;1 {42f3c238-e8e8-4015-9ca2-148723a8afcf} +category app-startup GeckoViewPermission service,@mozilla.org/content-permission/prompt;1 + # GeckoViewPrompt.js component {076ac188-23c1-4390-aa08-7ef1f78ca5d9} GeckoViewPrompt.js contract @mozilla.org/embedcomp/prompt-service;1 {076ac188-23c1-4390-aa08-7ef1f78ca5d9} diff --git a/mobile/android/components/geckoview/GeckoViewPermission.js b/mobile/android/components/geckoview/GeckoViewPermission.js new file mode 100644 index 0000000000000..fa2e161fb120c --- /dev/null +++ b/mobile/android/components/geckoview/GeckoViewPermission.js @@ -0,0 +1,250 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components; + +Cu.import("resource://gre/modules/Services.jsm"); +Cu.import("resource://gre/modules/XPCOMUtils.jsm"); + +XPCOMUtils.defineLazyModuleGetter(this, "EventDispatcher", + "resource://gre/modules/Messaging.jsm"); + +// See: http://developer.android.com/reference/android/Manifest.permission.html +const PERM_ACCESS_FINE_LOCATION = "android.permission.ACCESS_FINE_LOCATION"; +const PERM_CAMERA = "android.permission.CAMERA"; +const PERM_RECORD_AUDIO = "android.permission.RECORD_AUDIO"; + +function GeckoViewPermission() { +} + +GeckoViewPermission.prototype = { + classID: Components.ID("{42f3c238-e8e8-4015-9ca2-148723a8afcf}"), + + QueryInterface: XPCOMUtils.generateQI([ + Ci.nsIObserver, Ci.nsIContentPermissionPrompt]), + + _appPermissions: {}, + + /* ---------- nsIObserver ---------- */ + observe: function(aSubject, aTopic, aData) { + switch (aTopic) { + case "app-startup": { + Services.obs.addObserver(this, "getUserMedia:ask-device-permission"); + Services.obs.addObserver(this, "getUserMedia:request"); + Services.obs.addObserver(this, "PeerConnection:request"); + break; + } + case "getUserMedia:ask-device-permission": { + this.handleMediaAskDevicePermission(aData, aSubject); + break; + } + case "getUserMedia:request": { + this.handleMediaRequest(aSubject); + break; + } + case "PeerConnection:request": { + this.handlePeerConnectionRequest(aSubject); + break; + } + } + }, + + handleMediaAskDevicePermission: function(aType, aCallback) { + let perms = []; + if (aType === "video" || aType === "all") { + perms.push(PERM_CAMERA); + } + if (aType === "audio" || aType === "all") { + perms.push(PERM_RECORD_AUDIO); + } + + let dispatcher = this.getActiveDispatcher(); + let callback = _ => { + Services.obs.notifyObservers(aCallback, "getUserMedia:got-device-permission"); + }; + + if (dispatcher) { + this.getAppPermissions(dispatcher, perms).then(callback, callback); + } else { + // No dispatcher; just bail. + callback(); + } + }, + + handleMediaRequest: function(aRequest) { + let constraints = aRequest.getConstraints(); + let callId = aRequest.callID; + let denyRequest = _ => { + Services.obs.notifyObservers(null, "getUserMedia:response:deny", callId); + }; + + let win = Services.wm.getOuterWindowWithId(aRequest.windowID); + new Promise((resolve, reject) => { + win.navigator.mozGetUserMediaDevices(constraints, resolve, reject, + aRequest.innerWindowID, callId); + // Release the request first. + aRequest = undefined; + }).then(devices => { + if (win.closed) { + return; + } + + let sources = devices.map(device => { + device = device.QueryInterface(Ci.nsIMediaDevice); + return { + type: device.type, + id: device.id, + rawId: device.rawId, + name: device.name, + mediaSource: device.mediaSource, + }; + }); + + if (constraints.video && !sources.some(source => source.type === "video")) { + throw "no video source"; + } else if (constraints.audio && !sources.some(source => source.type === "audio")) { + throw "no audio source"; + } + + let dispatcher = this.getDispatcherForWindow(win); + let uri = win.document.documentURIObject; + return dispatcher.sendRequestForResult({ + type: "GeckoView:MediaPermission", + uri: uri.spec, + video: constraints.video ? sources.filter(source => source.type === "video") : null, + audio: constraints.audio ? sources.filter(source => source.type === "audio") : null, + }).then(response => { + if (!response) { + // Rejected. + denyRequest(); + return; + } + let allowedDevices = Cc["@mozilla.org/array;1"].createInstance(Ci.nsIMutableArray); + if (constraints.video) { + let video = devices.find(device => response.video === device.id); + if (!video) { + throw new Error("invalid video id"); + } + // Although the lifetime is "session" it will be removed upon + // use so it's more of a one-shot. + Services.perms.add(uri, "MediaManagerVideo", + Services.perms.ALLOW_ACTION, + Services.perms.EXPIRE_SESSION); + allowedDevices.appendElement(video); + } + if (constraints.audio) { + let audio = devices.find(device => response.audio === device.id); + if (!audio) { + throw new Error("invalid audio id"); + } + allowedDevices.appendElement(audio); + } + Services.obs.notifyObservers( + allowedDevices, "getUserMedia:response:allow", callId); + }); + }).catch(error => { + Cu.reportError("Media device error: " + error); + denyRequest(); + }); + }, + + handlePeerConnectionRequest: function(aRequest) { + Services.obs.notifyObservers(null, "PeerConnection:response:allow", aRequest.callID); + }, + + getActiveDispatcher: function() { + let getDispatcher = win => { + try { + let dispatcher = win.WindowEventDispatcher || EventDispatcher.for(win); + if (!win.closed && dispatcher) { + return dispatcher; + } + } catch (e) { + // Ignore. + } + return null; + }; + + let dispatcher = getDispatcher(Services.focus.activeWindow.top); + if (dispatcher) { + return dispatcher; + } + + let iter = Services.wm.getEnumerator(/* windowType */ null); + while (iter.hasMoreElements()) { + dispatcher = getDispatcher(iter.getNext().QueryInterface(Ci.nsIDOMWindow).top); + if (dispatcher) { + return dispatcher; + } + } + return null; + }, + + getDispatcherForWindow: function(aWin) { + aWin = aWin.QueryInterface(Ci.nsIInterfaceRequestor) + .getInterface(Ci.nsIDocShell).QueryInterface(Ci.nsIDocShellTreeItem) + .rootTreeItem.QueryInterface(Ci.nsIInterfaceRequestor) + .getInterface(Ci.nsIDOMWindow); + return aWin.WindowEventDispatcher || EventDispatcher.for(aWin); + }, + + checkAppPermissions: function(aPerms) { + return aPerms.every(perm => this._appPermissions[perm]); + }, + + getAppPermissions: function(aDispatcher, aPerms) { + let perms = aPerms.filter(perm => !this._appPermissions[perm]); + if (!perms.length) { + return Promise.resolve(/* granted */ true); + } + return aDispatcher.sendRequestForResult({ + type: "GeckoView:AndroidPermission", + perms: perms, + }).then(granted => { + if (granted) { + for (let perm of perms) { + this._appPermissions[perm] = true; + } + } + return granted; + }); + }, + + prompt: function(aRequest) { + // Only allow exactly one permission request here. + let types = aRequest.types.QueryInterface(Ci.nsIArray); + if (types.length !== 1) { + aRequest.cancel(); + return; + } + + let perm = types.queryElementAt(0, Ci.nsIContentPermissionType); + let dispatcher = this.getDispatcherForWindow( + aRequest.window ? aRequest.window.top : aRequest.element.ownerGlobal.top); + let promise = dispatcher.sendRequestForResult({ + type: "GeckoView:ContentPermission", + uri: aRequest.principal.URI.spec, + perm: perm.type, + access: perm.access !== "unused" ? perm.access : null, + }).then(granted => { + if (!granted) { + return false; + } + // Ask for app permission after asking for content permission. + if (perm.type === "geolocation") { + return this.getAppPermissions(dispatcher, [PERM_ACCESS_FINE_LOCATION]); + } + return true; + }).catch(error => { + Cu.reportError("Permission error: " + error); + return /* granted */ false; + }).then(granted => { + (granted ? aRequest.allow : aRequest.cancel)(); + // Manually release the target request here to facilitate garbage collection. + aRequest = undefined; + }); + }, +}; + +this.NSGetFactory = XPCOMUtils.generateNSGetFactory([GeckoViewPermission]); diff --git a/mobile/android/components/geckoview/moz.build b/mobile/android/components/geckoview/moz.build index 316b3e99b0345..56fade2d7e5ed 100644 --- a/mobile/android/components/geckoview/moz.build +++ b/mobile/android/components/geckoview/moz.build @@ -6,5 +6,6 @@ EXTRA_COMPONENTS += [ 'GeckoView.manifest', + 'GeckoViewPermission.js', 'GeckoViewPrompt.js', ] diff --git a/mobile/android/installer/package-manifest.in b/mobile/android/installer/package-manifest.in index a132477041489..5e4ab54b2e14d 100644 --- a/mobile/android/installer/package-manifest.in +++ b/mobile/android/installer/package-manifest.in @@ -527,6 +527,7 @@ #ifdef MOZ_GECKOVIEW_JAR @BINPATH@/components/GeckoView.manifest @BINPATH@/components/GeckoViewPrompt.js +@BINPATH@/components/GeckoViewPermission.js #else @BINPATH@/chrome/chrome@JAREXT@ @BINPATH@/chrome/chrome.manifest From 44c21b3e03605aee81aec2ababeeb4851a371ab6 Mon Sep 17 00:00:00 2001 From: Jim Chen Date: Thu, 20 Jul 2017 17:52:14 -0400 Subject: [PATCH 047/152] Bug 1322586 - 4. Add and implement PermissionDelegate support; r=esawin Add PermissionDelegate interface and implement support for Android and content permissions (WebRTC permissions will be implemented later). MozReview-Commit-ID: 1B2DUjh8Ajw --HG-- extra : rebase_source : 7c0a4489b136100ca57740512436bb8cccc62f3a --- .../java/org/mozilla/gecko/GeckoView.java | 222 ++++++++++++++++++ 1 file changed, 222 insertions(+) diff --git a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoView.java b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoView.java index d90872b7f2785..96bceaa8a93a6 100644 --- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoView.java +++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoView.java @@ -188,6 +188,109 @@ public void handleMessage(final ScrollListener listener, } }; + private final GeckoViewHandler mPermissionHandler = + new GeckoViewHandler( + "GeckoViewPermission", this, + new String[] { + "GeckoView:AndroidPermission", + "GeckoView:ContentPermission", + "GeckoView:MediaPermission" + }, /* alwaysListen */ true + ) { + @Override + public void handleMessage(final PermissionDelegate listener, + final String event, + final GeckoBundle message, + final EventCallback callback) { + + if (listener == null) { + callback.sendSuccess(/* granted */ false); + return; + } + if ("GeckoView:AndroidPermission".equals(event)) { + listener.requestAndroidPermissions( + GeckoView.this, message.getStringArray("perms"), + new PermissionCallback("android", callback)); + } else if ("GeckoView:ContentPermission".equals(event)) { + final String type = message.getString("perm"); + listener.requestContentPermission( + GeckoView.this, message.getString("uri"), + type, message.getString("access"), + new PermissionCallback(type, callback)); + } else if ("GeckoView:MediaPermission".equals(event)) { + listener.requestMediaPermission( + GeckoView.this, message.getString("uri"), + message.getBundleArray("video"), message.getBundleArray("audio"), + new PermissionCallback("media", callback)); + } + } + }; + + private static class PermissionCallback implements + PermissionDelegate.Callback, PermissionDelegate.MediaCallback { + + private final String mType; + private EventCallback mCallback; + + public PermissionCallback(final String type, final EventCallback callback) { + mType = type; + mCallback = callback; + } + + private void submit(final Object response) { + if (mCallback != null) { + mCallback.sendSuccess(response); + mCallback = null; + } + } + + @Override // PermissionDelegate.Callback + public void grant() { + if ("media".equals(mType)) { + throw new UnsupportedOperationException(); + } + submit(/* response */ true); + } + + @Override // PermissionDelegate.Callback, PermissionDelegate.MediaCallback + public void reject() { + submit(/* response */ false); + } + + @Override // PermissionDelegate.MediaCallback + public void grant(final String video, final String audio) { + if (!"media".equals(mType)) { + throw new UnsupportedOperationException(); + } + final GeckoBundle response = new GeckoBundle(2); + response.putString("video", video); + response.putString("audio", audio); + submit(response); + } + + @Override // PermissionDelegate.MediaCallback + public void grant(final GeckoBundle video, final GeckoBundle audio) { + grant(video != null ? video.getString("id") : null, + audio != null ? audio.getString("id") : null); + } + } + + /** + * Get the current prompt delegate for this GeckoView. + * @return PromptDelegate instance or null if using default delegate. + */ + public PermissionDelegate getPermissionDelegate() { + return mPermissionHandler.getListener(); + } + + /** + * Set the current permission delegate for this GeckoView. + * @param delegate PermissionDelegate instance or null to use the default delegate. + */ + public void setPermissionDelegate(final PermissionDelegate delegate) { + mPermissionHandler.setListener(delegate, this); + } + private PromptDelegate mPromptDelegate; private InputConnectionListener mInputConnectionListener; @@ -1577,4 +1680,123 @@ public interface ScrollListener { */ public void onScrollChanged(GeckoView view, int scrollX, int scrollY); } + + /** + * GeckoView applications implement this interface to handle requests for permissions + * from content, such as geolocation and notifications. For each permission, usually + * two requests are generated: one request for the Android app permission through + * requestAppPermissions, which is typically handled by a system permission dialog; + * and another request for the content permission (e.g. through + * requestContentPermission), which is typically handled by an app-specific + * permission dialog. + **/ + public interface PermissionDelegate { + /** + * Callback interface for notifying the result of a permission request. + */ + interface Callback { + /** + * Called by the implementation after permissions are granted; the + * implementation must call either grant() or reject() for every request. + */ + void grant(); + + /** + * Called by the implementation when permissions are not granted; the + * implementation must call either grant() or reject() for every request. + */ + void reject(); + } + + /** + * Request Android app permissions. + * + * @param view GeckoView instance requesting the permissions. + * @param permissions List of permissions to request; possible values are, + * android.Manifest.permission.ACCESS_FINE_LOCATION + * android.Manifest.permission.CAMERA + * android.Manifest.permission.RECORD_AUDIO + * @param callback Callback interface. + */ + void requestAndroidPermissions(GeckoView view, String[] permissions, + Callback callback); + + /** + * Request content permission. + * + * @param view GeckoView instance requesting the permission. + * @param uri The URI of the content requesting the permission. + * @param type The type of the requested permission; possible values are, + * "geolocation": permission for using the geolocation API + * "desktop-notification": permission for using the notifications API + * @param access Not used. + * @param callback Callback interface. + */ + void requestContentPermission(GeckoView view, String uri, String type, + String access, Callback callback); + + /** + * Callback interface for notifying the result of a media permission request, + * including which media source(s) to use. + */ + interface MediaCallback { + /** + * Called by the implementation after permissions are granted; the + * implementation must call one of grant() or reject() for every request. + * + * @param video "id" value from the bundle for the video source to use, + * or null when video is not requested. + * @param audio "id" value from the bundle for the audio source to use, + * or null when audio is not requested. + */ + void grant(final String video, final String audio); + + /** + * Called by the implementation after permissions are granted; the + * implementation must call one of grant() or reject() for every request. + * + * @param video Bundle for the video source to use (must be an original + * GeckoBundle object that was passed to the implementation); + * or null when video is not requested. + * @param audio Bundle for the audio source to use (must be an original + * GeckoBundle object that was passed to the implementation); + * or null when audio is not requested. + */ + void grant(final GeckoBundle video, final GeckoBundle audio); + + /** + * Called by the implementation when permissions are not granted; the + * implementation must call one of grant() or reject() for every request. + */ + void reject(); + } + + /** + * Request content media permissions, including request for which video and/or + * audio source to use. + * + * @param view GeckoView instance requesting the permission. + * @param uri The URI of the content requesting the permission. + * @param video List of video sources, or null if not requesting video. + * Each bundle represents a video source, with keys, + * "id": String, the origin-specific source identifier; + * "rawId": String, the non-origin-specific source identifier; + * "name": String, the name of the video source from the system + * (for example, "Camera 0, Facing back, Orientation 90"); + * may be empty; + * "mediaSource": String, the media source type; possible values are, + * "camera", "screen", "application", "window", + * "browser", and "other"; + * "type": String, always "video"; + * @param audio List of audio sources, or null if not requesting audio. + * Each bundle represents an audio source with same keys and possible + * values as video source bundles above, except for: + * "mediaSource", String; possible values are "microphone", + * "audioCapture", and "other"; + * "type", String, always "audio"; + * @param callback Callback interface. + */ + void requestMediaPermission(GeckoView view, String uri, GeckoBundle[] video, + GeckoBundle[] audio, MediaCallback callback); + } } From 37e37382dc6037516f6dc16dc202359ed5c097e7 Mon Sep 17 00:00:00 2001 From: Jim Chen Date: Thu, 20 Jul 2017 17:52:14 -0400 Subject: [PATCH 048/152] Bug 1322586 - 5. Implement PermissionDelegate for geckoview_example; r=droeh Add a sample implementation of PermissionDelegate for geckoview_example; Because the prompt code has some existing boilerplate, the actual prompts are implemented in BasicGeckoViewPrompt. MozReview-Commit-ID: EDfmRPn4cjR --HG-- extra : rebase_source : 3b4b770c606a9be3326bca3acd87770c5f8bb4fa --- .../BasicGeckoViewPrompt.java | 115 +++++++++++++++- .../geckoview_example/GeckoViewActivity.java | 126 ++++++++++++++++++ .../src/main/res/values/strings.xml | 9 ++ 3 files changed, 246 insertions(+), 4 deletions(-) diff --git a/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/BasicGeckoViewPrompt.java b/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/BasicGeckoViewPrompt.java index ff486cd7f597a..11082bb1a33b0 100644 --- a/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/BasicGeckoViewPrompt.java +++ b/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/BasicGeckoViewPrompt.java @@ -36,7 +36,7 @@ import android.widget.LinearLayout; import android.widget.ListView; import android.widget.ScrollView; -import android.widget.SeekBar; +import android.widget.Spinner; import android.widget.TextView; import android.widget.TimePicker; @@ -171,10 +171,11 @@ private LinearLayout addStandardLayout(final AlertDialog.Builder builder, final AlertCallback callback) { final ScrollView scrollView = new ScrollView(builder.getContext()); final LinearLayout container = new LinearLayout(builder.getContext()); - final int padding = getViewPadding(builder); + final int horizontalPadding = getViewPadding(builder); + final int verticalPadding = (msg == null || msg.isEmpty()) ? horizontalPadding : 0; container.setOrientation(LinearLayout.VERTICAL); - container.setPadding(/* left */ padding, /* top */ 0, - /* right */ padding, /* bottom */ 0); + container.setPadding(/* left */ horizontalPadding, /* top */ verticalPadding, + /* right */ horizontalPadding, /* bottom */ verticalPadding); scrollView.addView(container); builder.setTitle(title) .setMessage(msg) @@ -788,4 +789,110 @@ public void onFileCallbackResult(final int resultCode, final Intent data) { callback.confirm(uris.toArray(new Uri[uris.size()])); } } + + public void promptForPermission(final GeckoView view, final String title, + final GeckoView.PermissionDelegate.Callback callback) { + final Activity activity = getActivity(view); + if (activity == null) { + callback.reject(); + return; + } + final AlertDialog.Builder builder = new AlertDialog.Builder(activity); + builder.setTitle(title) + .setOnDismissListener(new DialogInterface.OnDismissListener() { + @Override + public void onDismiss(final DialogInterface dialog) { + callback.reject(); + } + }) + .setNegativeButton(android.R.string.cancel, /* onClickListener */ null) + .setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() { + @Override + public void onClick(final DialogInterface dialog, final int which) { + callback.grant(); + } + }) + .show(); + } + + private Spinner addMediaSpinner(final Context context, final ViewGroup container, + final GeckoBundle[] sources) { + final ArrayAdapter adapter = new ArrayAdapter( + context, android.R.layout.simple_spinner_item) { + private View convertView(final int position, final View view) { + if (view != null) { + final GeckoBundle item = getItem(position); + ((TextView) view).setText(item.getString("name")); + } + return view; + } + + @Override + public View getView(final int position, View view, + final ViewGroup parent) { + return convertView(position, super.getView(position, view, parent)); + } + + @Override + public View getDropDownView(final int position, final View view, + final ViewGroup parent) { + return convertView(position, super.getDropDownView(position, view, parent)); + } + }; + adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); + adapter.addAll(sources); + + final Spinner spinner = new Spinner(context); + spinner.setAdapter(adapter); + spinner.setSelection(0); + container.addView(spinner); + return spinner; + } + + public void promptForMedia(final GeckoView view, final String title, + final GeckoBundle[] video, final GeckoBundle[] audio, + final GeckoView.PermissionDelegate.MediaCallback callback) { + final Activity activity = getActivity(view); + if (activity == null || (video == null && audio == null)) { + callback.reject(); + return; + } + final AlertDialog.Builder builder = new AlertDialog.Builder(activity); + final LinearLayout container = addStandardLayout(builder, title, /* msg */ null, + /* callback */ null); + + final Spinner videoSpinner; + if (video != null) { + videoSpinner = addMediaSpinner(builder.getContext(), container, video); + } else { + videoSpinner = null; + } + + final Spinner audioSpinner; + if (audio != null) { + audioSpinner = addMediaSpinner(builder.getContext(), container, audio); + } else { + audioSpinner = null; + } + + builder.setOnDismissListener(new DialogInterface.OnDismissListener() { + @Override + public void onDismiss(final DialogInterface dialog) { + callback.reject(); + } + }) + .setNegativeButton(android.R.string.cancel, /* listener */ null) + .setPositiveButton(android.R.string.ok, + new DialogInterface.OnClickListener() { + @Override + public void onClick(final DialogInterface dialog, final int which) { + final GeckoBundle video = (videoSpinner != null) + ? (GeckoBundle) videoSpinner.getSelectedItem() : null; + final GeckoBundle audio = (audioSpinner != null) + ? (GeckoBundle) audioSpinner.getSelectedItem() : null; + callback.grant(video, audio); + } + }) + .show(); + } } diff --git a/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/GeckoViewActivity.java b/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/GeckoViewActivity.java index a00782ff5dc4b..3a3bcc039ded5 100644 --- a/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/GeckoViewActivity.java +++ b/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/GeckoViewActivity.java @@ -7,13 +7,17 @@ import android.app.Activity; import android.content.Intent; +import android.content.pm.PackageManager; import android.net.Uri; +import android.os.Build; import android.os.Bundle; import android.os.SystemClock; import android.text.TextUtils; import android.util.Log; import android.view.WindowManager; +import java.util.Locale; + import org.mozilla.gecko.GeckoView; import org.mozilla.gecko.GeckoViewSettings; import org.mozilla.gecko.util.GeckoBundle; @@ -24,6 +28,7 @@ public class GeckoViewActivity extends Activity { private static final String USE_MULTIPROCESS_EXTRA = "use_multiprocess"; /* package */ static final int REQUEST_FILE_PICKER = 1; + private static final int REQUEST_PERMISSIONS = 2; private GeckoView mGeckoView; @@ -61,6 +66,10 @@ protected void onCreate(Bundle savedInstanceState) { prompt.filePickerRequestCode = REQUEST_FILE_PICKER; mGeckoView.setPromptDelegate(prompt); + final MyGeckoViewPermission permission = new MyGeckoViewPermission(); + permission.androidPermissionRequestCode = REQUEST_PERMISSIONS; + mGeckoView.setPermissionDelegate(permission); + loadFromIntent(getIntent()); } @@ -95,6 +104,19 @@ protected void onActivityResult(final int requestCode, final int resultCode, } } + @Override + public void onRequestPermissionsResult(final int requestCode, + final String[] permissions, + final int[] grantResults) { + if (requestCode == REQUEST_PERMISSIONS) { + final MyGeckoViewPermission permission = (MyGeckoViewPermission) + mGeckoView.getPermissionDelegate(); + permission.onRequestPermissionsResult(permissions, grantResults); + } else { + super.onRequestPermissionsResult(requestCode, permissions, grantResults); + } + } + private class MyGeckoViewContent implements GeckoView.ContentListener { @Override public void onTitleChange(GeckoView view, String title) { @@ -143,4 +165,108 @@ public void onSecurityChange(GeckoView view, int status, GeckoBundle identity) { Log.i(LOGTAG, "Security status changed to " + statusString); } } + + private class MyGeckoViewPermission implements GeckoView.PermissionDelegate { + + public int androidPermissionRequestCode = 1; + private Callback mCallback; + + public void onRequestPermissionsResult(final String[] permissions, + final int[] grantResults) { + if (mCallback == null) { + return; + } + + final Callback cb = mCallback; + mCallback = null; + for (final int result : grantResults) { + if (result != PackageManager.PERMISSION_GRANTED) { + // At least one permission was not granted. + cb.reject(); + return; + } + } + cb.grant(); + } + + @Override + public void requestAndroidPermissions(final GeckoView view, final String[] permissions, + final Callback callback) { + if (Build.VERSION.SDK_INT < 23) { + // requestPermissions was introduced in API 23. + callback.grant(); + return; + } + mCallback = callback; + requestPermissions(permissions, androidPermissionRequestCode); + } + + @Override + public void requestContentPermission(final GeckoView view, final String uri, + final String type, final String access, + final Callback callback) { + final int resId; + if ("geolocation".equals(type)) { + resId = R.string.request_geolocation; + } else if ("desktop-notification".equals(type)) { + resId = R.string.request_notification; + } else { + Log.w(LOGTAG, "Unknown permission: " + type); + callback.reject(); + return; + } + + final String title = getString(resId, Uri.parse(uri).getAuthority()); + final BasicGeckoViewPrompt prompt = (BasicGeckoViewPrompt) + mGeckoView.getPromptDelegate(); + prompt.promptForPermission(view, title, callback); + } + + private void normalizeMediaName(final GeckoBundle[] sources) { + if (sources == null) { + return; + } + for (final GeckoBundle source : sources) { + final String mediaSource = source.getString("mediaSource"); + String name = source.getString("name"); + if ("camera".equals(mediaSource)) { + if (name.toLowerCase(Locale.ENGLISH).contains("front")) { + name = getString(R.string.media_front_camera); + } else { + name = getString(R.string.media_back_camera); + } + } else if (!name.isEmpty()) { + continue; + } else if ("microphone".equals(mediaSource)) { + name = getString(R.string.media_microphone); + } else { + name = getString(R.string.media_other); + } + source.putString("name", name); + } + } + + @Override + public void requestMediaPermission(final GeckoView view, final String uri, + final GeckoBundle[] video, + final GeckoBundle[] audio, + final MediaCallback callback) { + final String host = Uri.parse(uri).getAuthority(); + final String title; + if (audio == null) { + title = getString(R.string.request_video, host); + } else if (video == null) { + title = getString(R.string.request_audio, host); + } else { + title = getString(R.string.request_media, host); + } + + normalizeMediaName(video); + normalizeMediaName(audio); + + final BasicGeckoViewPrompt prompt = (BasicGeckoViewPrompt) + mGeckoView.getPromptDelegate(); + prompt.promptForMedia(view, title, video, audio, callback); + } + } } diff --git a/mobile/android/geckoview_example/src/main/res/values/strings.xml b/mobile/android/geckoview_example/src/main/res/values/strings.xml index 6835b71b7a9ff..08507b83fe0fd 100644 --- a/mobile/android/geckoview_example/src/main/res/values/strings.xml +++ b/mobile/android/geckoview_example/src/main/res/values/strings.xml @@ -3,4 +3,13 @@ Username Password Clear + Share location with "%1$s"? + Allow notifications for "%1$s"? + Share video with "%1$s" + Share audio with "%1$s" + Share video and audio with "%1$s" + Back camera + Front camera + Microphone + Unknown source From 88ded8d0627983dc86e3d78845b0710149f0312c Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Fri, 21 Jul 2017 06:55:06 +0900 Subject: [PATCH 049/152] Bug 1382849 - Add workspace cache to toolchain docker-worker jobs. r=dustin,gps --HG-- extra : rebase_source : 79cfd065ff767b78ec81e02afd021dceb6c89e52 --- taskcluster/ci/toolchain/kind.yml | 1 + taskcluster/taskgraph/transforms/build_attrs.py | 6 +++++- taskcluster/taskgraph/transforms/job/toolchain.py | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/taskcluster/ci/toolchain/kind.yml b/taskcluster/ci/toolchain/kind.yml index 23f5290fb6fe0..143f22aec35b2 100644 --- a/taskcluster/ci/toolchain/kind.yml +++ b/taskcluster/ci/toolchain/kind.yml @@ -6,6 +6,7 @@ loader: taskgraph.loader.transform:loader transforms: - taskgraph.transforms.try_job:transforms + - taskgraph.transforms.build_attrs:transforms - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/taskgraph/transforms/build_attrs.py b/taskcluster/taskgraph/transforms/build_attrs.py index 56c0076148b01..4781231f259f6 100644 --- a/taskcluster/taskgraph/transforms/build_attrs.py +++ b/taskcluster/taskgraph/transforms/build_attrs.py @@ -17,7 +17,11 @@ def set_build_attributes(config, jobs): appropriately for that purpose. """ for job in jobs: - build_platform, build_type = job['name'].split('/') + if '/' in job['name']: + build_platform, build_type = job['name'].split('/') + else: + build_platform = job['name'] + build_type = 'opt' # pgo builds are represented as a different platform, type opt if build_type == 'pgo': diff --git a/taskcluster/taskgraph/transforms/job/toolchain.py b/taskcluster/taskgraph/transforms/job/toolchain.py index bc48c29ca2236..1bdfaff8d29d6 100644 --- a/taskcluster/taskgraph/transforms/job/toolchain.py +++ b/taskcluster/taskgraph/transforms/job/toolchain.py @@ -14,6 +14,7 @@ from taskgraph.transforms.job.common import ( docker_worker_add_tc_vcs_cache, docker_worker_add_gecko_vcs_env_vars, + docker_worker_add_workspace_cache, support_vcs_checkout, ) from taskgraph.util.hash import hash_paths @@ -86,6 +87,7 @@ def docker_worker_toolchain(config, job, taskdesc): 'type': 'directory', }) + docker_worker_add_workspace_cache(config, job, taskdesc) docker_worker_add_tc_vcs_cache(config, job, taskdesc) docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc) support_vcs_checkout(config, job, taskdesc) From 29dfb8fda791e67dc460d0e194ff591403d30764 Mon Sep 17 00:00:00 2001 From: Andrew Swan Date: Wed, 19 Jul 2017 10:27:21 -0700 Subject: [PATCH 050/152] Bug 1371762 Part 1 Let cert shim code selectively identify some xpis as privileged r=rhelmer MozReview-Commit-ID: L628sUEyHcL --HG-- extra : rebase_source : 00d591f24f4f0486ebf5e01a60a00f5669f602f0 --- toolkit/mozapps/extensions/internal/AddonTestUtils.jsm | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/toolkit/mozapps/extensions/internal/AddonTestUtils.jsm b/toolkit/mozapps/extensions/internal/AddonTestUtils.jsm index 0f410c204a071..e9834973d8a92 100644 --- a/toolkit/mozapps/extensions/internal/AddonTestUtils.jsm +++ b/toolkit/mozapps/extensions/internal/AddonTestUtils.jsm @@ -504,7 +504,11 @@ var AddonTestUtils = { let fakeCert = {commonName: id}; if (this.usePrivilegedSignatures) { - fakeCert.organizationalUnit = "Mozilla Extensions"; + let privileged = typeof this.usePrivilegedSignatures == "function" ? + this.usePrivilegedSignatures(id) : this.usePrivilegedSignatures; + if (privileged) { + fakeCert.organizationalUnit = "Mozilla Extensions"; + } } return [callback, Cr.NS_OK, fakeCert]; From b315b2ec675579116fdd22159f7d3195341e74e9 Mon Sep 17 00:00:00 2001 From: Andrew Swan Date: Wed, 19 Jul 2017 10:28:23 -0700 Subject: [PATCH 051/152] Bug 1371762 Part 2 Treat webextension experiments as legacy r=rhelmer MozReview-Commit-ID: ATa0DXnV2au --HG-- extra : rebase_source : ccec3a98c62a2739831a4a3f8066cbb07b5c4bf0 --- .../extensions/internal/XPIProvider.jsm | 13 +- .../extensions/test/xpcshell/test_legacy.js | 131 ++++++++++++++++++ .../test/xpcshell/xpcshell-shared.ini | 1 + 3 files changed, 141 insertions(+), 4 deletions(-) create mode 100644 toolkit/mozapps/extensions/test/xpcshell/test_legacy.js diff --git a/toolkit/mozapps/extensions/internal/XPIProvider.jsm b/toolkit/mozapps/extensions/internal/XPIProvider.jsm index 33ec43f9f4699..029fb64909738 100644 --- a/toolkit/mozapps/extensions/internal/XPIProvider.jsm +++ b/toolkit/mozapps/extensions/internal/XPIProvider.jsm @@ -262,7 +262,12 @@ const SIGNED_TYPES = new Set([ "webextension-theme", ]); -const ALL_TYPES = new Set([ +const LEGACY_TYPES = new Set([ + "apiextension", + "extension", +]); + +const ALL_EXTERNAL_TYPES = new Set([ "dictionary", "extension", "experiment", @@ -835,8 +840,8 @@ function isUsableAddon(aAddon) { return false; } - if (!AddonSettings.ALLOW_LEGACY_EXTENSIONS && - aAddon.type == "extension" && !aAddon._installLocation.isSystem && + if (!AddonSettings.ALLOW_LEGACY_EXTENSIONS && LEGACY_TYPES.has(aAddon.type) && + !aAddon._installLocation.isSystem && aAddon.signedState !== AddonManager.SIGNEDSTATE_PRIVILEGED) { logger.warn(`disabling legacy extension ${aAddon.id}`); return false; @@ -3588,7 +3593,7 @@ this.XPIProvider = { */ getAddonsByTypes(aTypes, aCallback) { let typesToGet = getAllAliasesForTypes(aTypes); - if (typesToGet && !typesToGet.some(type => ALL_TYPES.has(type))) { + if (typesToGet && !typesToGet.some(type => ALL_EXTERNAL_TYPES.has(type))) { aCallback([]); return; } diff --git a/toolkit/mozapps/extensions/test/xpcshell/test_legacy.js b/toolkit/mozapps/extensions/test/xpcshell/test_legacy.js new file mode 100644 index 0000000000000..0190a5afc92b1 --- /dev/null +++ b/toolkit/mozapps/extensions/test/xpcshell/test_legacy.js @@ -0,0 +1,131 @@ + +const LEGACY_PREF = "extensions.legacy.enabled"; + +createAppInfo("xpcshell@tests.mozilla.org", "XPCShell", "1", "1"); +startupManager(); + +add_task(async function test_disable() { + let legacy = [ + { + id: "bootstrap@tests.mozilla.org", + name: "Bootstrap add-on", + version: "1.0", + bootstrap: true, + multiprocessCompatible: true, + }, + { + id: "apiexperiment@tests.mozilla.org", + name: "WebExtension Experiment", + version: "1.0", + type: 256, + }, + ]; + + let nonLegacy = [ + { + id: "webextension@tests.mozilla.org", + manifest: { + applications: {gecko: {id: "webextension@tests.mozilla.org"}}, + }, + }, + { + id: "privileged@tests.mozilla.org", + name: "Privileged Bootstrap add-on", + version: "1.0", + bootstrap: true, + multiprocessCompatible: true, + }, + { + id: "langpack@tests.mozilla.org", + name: "Test Langpack", + version: "1.0", + type: "8", + }, + { + id: "dictionary@tests.mozilla.org", + name: "Test Dictionary", + version: "1.0", + type: "64", + } + ]; + + function makeXPI(info) { + if (info.manifest) { + return createTempWebExtensionFile(info); + } + + return createTempXPIFile(Object.assign({}, info, { + targetApplications: [{ + id: "xpcshell@tests.mozilla.org", + minVersion: "1", + maxVersion: "1" + }], + })); + } + + AddonTestUtils.usePrivilegedSignatures = id => id.startsWith("privileged"); + + // Start out with legacy extensions disabled, installing non-legacy + // extensions should succeed. + Services.prefs.setBoolPref(LEGACY_PREF, false); + let installs = await Promise.all(nonLegacy.map(info => { + let xpi = makeXPI(info); + return AddonManager.getInstallForFile(xpi); + })); + await promiseCompleteAllInstalls(installs); + for (let install of installs) { + do_check_eq(install.state, AddonManager.STATE_INSTALLED); + do_check_eq(install.error, 0); + } + let addons = await AddonManager.getAddonsByIDs(nonLegacy.map(a => a.id)); + for (let addon of addons) { + do_check_eq(addon.appDisabled, false); + } + + // And installing legacy extensions should fail + let legacyXPIs = legacy.map(makeXPI); + installs = await Promise.all(legacyXPIs.map(xpi => AddonManager.getInstallForFile(xpi))); + + // Yuck, the AddonInstall API is atrocious. Installs of incompatible + // extensions are detected when the install reaches the DOWNLOADED state + // and the install is abandoned at that point. Since this is a local file + // install we just start out in the DONWLOADED state. + for (let install of installs) { + do_check_eq(install.state, AddonManager.STATE_DOWNLOADED); + do_check_eq(install.addon.appDisabled, true); + } + + // Now enable legacy extensions, and we should be able to install + // the legacy extensions. + Services.prefs.setBoolPref(LEGACY_PREF, true); + installs = await Promise.all(legacyXPIs.map(xpi => AddonManager.getInstallForFile(xpi))); + for (let install of installs) { + do_check_eq(install.state, AddonManager.STATE_DOWNLOADED); + do_check_eq(install.addon.appDisabled, false); + } + await promiseCompleteAllInstalls(installs); + for (let install of installs) { + do_check_eq(install.state, AddonManager.STATE_INSTALLED); + do_check_eq(install.error, 0); + } + addons = await AddonManager.getAddonsByIDs(legacy.map(a => a.id)); + for (let addon of addons) { + do_check_eq(addon.appDisabled, false); + } + + // Flip the preference back, the legacy extensions should become disabled + // but non-legacy extensions should remain enabled. + Services.prefs.setBoolPref(LEGACY_PREF, false); + addons = await AddonManager.getAddonsByIDs(nonLegacy.map(a => a.id)); + for (let addon of addons) { + do_check_eq(addon.appDisabled, false); + addon.uninstall(); + } + addons = await AddonManager.getAddonsByIDs(legacy.map(a => a.id)); + for (let addon of addons) { + do_check_eq(addon.appDisabled, true); + addon.uninstall(); + } + + Services.prefs.clearUserPref(LEGACY_PREF); +}); diff --git a/toolkit/mozapps/extensions/test/xpcshell/xpcshell-shared.ini b/toolkit/mozapps/extensions/test/xpcshell/xpcshell-shared.ini index 6dba4285591db..1fc93cef9736d 100644 --- a/toolkit/mozapps/extensions/test/xpcshell/xpcshell-shared.ini +++ b/toolkit/mozapps/extensions/test/xpcshell/xpcshell-shared.ini @@ -239,6 +239,7 @@ skip-if = os == "android" skip-if = os == "android" run-sequentially = Uses hardcoded ports in xpi files. [test_isDebuggable.js] +[test_legacy.js] [test_locale.js] [test_locked.js] [test_locked2.js] From 19dd7600d0b09a46451286ab6427719bd0e01687 Mon Sep 17 00:00:00 2001 From: Brian Stack Date: Thu, 13 Jul 2017 10:41:58 -0700 Subject: [PATCH 052/152] Bug 1380454 - Port retrigger to actions.json r=dustin MozReview-Commit-ID: 3942ptSNiss --HG-- extra : rebase_source : 631cbd346438d83a5af0035939b26fd678de9573 --- taskcluster/actions/registry.py | 9 ++-- taskcluster/actions/retrigger.py | 56 ++++++++++++++++++++++ taskcluster/docs/action-implementation.rst | 2 +- 3 files changed, 63 insertions(+), 4 deletions(-) create mode 100644 taskcluster/actions/retrigger.py diff --git a/taskcluster/actions/registry.py b/taskcluster/actions/registry.py index 2d62597f50e90..8b144e1fc54c3 100644 --- a/taskcluster/actions/registry.py +++ b/taskcluster/actions/registry.py @@ -28,7 +28,7 @@ def is_json(data): return True -def register_task_action(name, title, description, order, context, schema): +def register_task_action(name, title, description, order, context, schema=None): """ Register an action task that can be triggered from supporting user interfaces, such as Treeherder. @@ -268,7 +268,7 @@ def render_actions_json(parameters): task = action.task_template_builder(parameters) if task: assert is_json(task), 'task must be a JSON compatible object' - result.append({ + res = { 'kind': 'task', 'name': action.name, 'title': action.title, @@ -276,7 +276,10 @@ def render_actions_json(parameters): 'context': action.context, 'schema': action.schema, 'task': task, - }) + } + if res['schema'] is None: + res.pop('schema') + result.append(res) return { 'version': 1, 'variables': { diff --git a/taskcluster/actions/retrigger.py b/taskcluster/actions/retrigger.py new file mode 100644 index 0000000000000..d43ed88c16208 --- /dev/null +++ b/taskcluster/actions/retrigger.py @@ -0,0 +1,56 @@ +from registry import register_task_action + + +@register_task_action( + title='Retrigger', + name='retrigger', + description='Create a clone of the task', + order=1, + context=[{}], +) +def retrigger_task_builder(parameters): + + new_expires = '30 days' + + return { + '$merge': [ + {'$eval': 'task'}, + {'created': {'$fromNow': ''}}, + {'deadline': {'$fromNow': '1 day'}}, + {'expires': {'$fromNow': new_expires}}, + {'payload': { + '$merge': [ + {'$eval': 'task.payload'}, + { + '$if': '"artifacts" in task.payload', + 'then': { + 'artifacts': { + '$if': 'typeof(task.payload.artifacts) == "object"', + 'then': { + '$map': {'$eval': 'task.payload.artifacts'}, + 'each(artifact)': { + '${artifact.key}': { + '$merge': [ + {'$eval': 'artifact.val'}, + {'expires': {'$fromNow': new_expires}}, + ], + }, + }, + }, + 'else': { + '$map': {'$eval': 'task.payload.artifacts'}, + 'each(artifact)': { + '$merge': [ + {'$eval': 'artifact'}, + {'expires': {'$fromNow': new_expires}}, + ], + }, + }, + }, + }, + 'else': {}, + } + ] + }} + ] + } diff --git a/taskcluster/docs/action-implementation.rst b/taskcluster/docs/action-implementation.rst index 1448bca8f50b1..770024b2e83c3 100644 --- a/taskcluster/docs/action-implementation.rst +++ b/taskcluster/docs/action-implementation.rst @@ -210,6 +210,7 @@ the example below illustrates how to create such an action:: 'enum': ['low', 'normal', 'high'], 'default': 'low', }, + ) def task_template_builder(parameters): # The task template builder may return None to signal that the action # isn't available. @@ -233,7 +234,6 @@ the example below illustrates how to create such an action:: # additional metadata for treeherder in task.extra.treeherder. ... }, - ) This kind of action is useful for creating simple derivative tasks, but is limited by the expressiveness of the template language. On the other hand, it From 7bea00cd2b2b233782cdc6e750c27970f0a1fa1e Mon Sep 17 00:00:00 2001 From: Ursula Sarracini Date: Thu, 20 Jul 2017 16:59:59 -0400 Subject: [PATCH 053/152] Bug 1382785 - Add Pocket, search delay, and bug fixes to Activity Stream r=Mardak MozReview-Commit-ID: CQEN0Rzy6TX --HG-- extra : rebase_source : 010c160c5689634056ffc81f6efb5d65961e14b8 --- .../activity-stream/common/Actions.jsm | 7 + .../activity-stream/common/Reducers.jsm | 62 +- .../data/content/activity-stream.bundle.js | 1039 ++++++++++++++--- .../data/content/activity-stream.css | 247 +++- .../data/content/activity-stream.html | 4 + .../content/assets/glyph-historyItem-16.svg | 6 + .../content/assets/glyph-info-option-12.svg | 1 + .../data/content/assets/glyph-now-16.svg | 6 + .../data/content/assets/glyph-pocket-16.svg | 4 +- .../data/content/assets/glyph-trending-16.svg | 8 + .../content/assets/topic-show-more-12.svg | 12 + .../activity-stream/data/locales.json | 5 +- .../activity-stream/lib/ActivityStream.jsm | 54 +- .../activity-stream/lib/PlacesFeed.jsm | 5 + .../activity-stream/lib/SnippetsFeed.jsm | 58 + .../extensions/activity-stream/lib/Store.jsm | 2 + .../activity-stream/lib/SystemTickFeed.jsm | 35 + .../activity-stream/lib/TopStoriesFeed.jsm | 187 +++ .../test/functional/mochitest/browser.ini | 1 - .../activity-stream/test/mozinfo.json | 3 - .../test/unit/common/Reducers.test.js | 94 +- .../test/unit/lib/ActivityStream.test.js | 24 +- .../test/unit/lib/PlacesFeed.test.js | 5 + .../test/unit/lib/SnippetsFeed.test.js | 60 + .../test/unit/lib/SystemTickFeed.test.js | 41 + .../test/unit/lib/TopStoriesFeed.test.js | 257 ++++ .../test/unit/lib/init-store.test.js | 10 + .../activity-stream/test/unit/unit-entry.js | 3 + 28 files changed, 2060 insertions(+), 180 deletions(-) create mode 100644 browser/extensions/activity-stream/data/content/assets/glyph-historyItem-16.svg create mode 100644 browser/extensions/activity-stream/data/content/assets/glyph-info-option-12.svg create mode 100644 browser/extensions/activity-stream/data/content/assets/glyph-now-16.svg create mode 100644 browser/extensions/activity-stream/data/content/assets/glyph-trending-16.svg create mode 100644 browser/extensions/activity-stream/data/content/assets/topic-show-more-12.svg create mode 100644 browser/extensions/activity-stream/lib/SnippetsFeed.jsm create mode 100644 browser/extensions/activity-stream/lib/SystemTickFeed.jsm create mode 100644 browser/extensions/activity-stream/lib/TopStoriesFeed.jsm delete mode 100644 browser/extensions/activity-stream/test/mozinfo.json create mode 100644 browser/extensions/activity-stream/test/unit/lib/SnippetsFeed.test.js create mode 100644 browser/extensions/activity-stream/test/unit/lib/SystemTickFeed.test.js create mode 100644 browser/extensions/activity-stream/test/unit/lib/TopStoriesFeed.test.js diff --git a/browser/extensions/activity-stream/common/Actions.jsm b/browser/extensions/activity-stream/common/Actions.jsm index ad9391e4d4b84..d05ec4c95d6dd 100644 --- a/browser/extensions/activity-stream/common/Actions.jsm +++ b/browser/extensions/activity-stream/common/Actions.jsm @@ -31,6 +31,7 @@ for (const type of [ "DELETE_HISTORY_URL_CONFIRM", "DIALOG_CANCEL", "DIALOG_OPEN", + "FEED_INIT", "INIT", "LOCALE_UPDATED", "NEW_TAB_INITIAL_STATE", @@ -50,7 +51,13 @@ for (const type of [ "PREF_CHANGED", "SAVE_TO_POCKET", "SCREENSHOT_UPDATED", + "SECTION_DEREGISTER", + "SECTION_REGISTER", + "SECTION_ROWS_UPDATE", "SET_PREF", + "SNIPPETS_DATA", + "SNIPPETS_RESET", + "SYSTEM_TICK", "TELEMETRY_PERFORMANCE_EVENT", "TELEMETRY_UNDESIRED_EVENT", "TELEMETRY_USER_EVENT", diff --git a/browser/extensions/activity-stream/common/Reducers.jsm b/browser/extensions/activity-stream/common/Reducers.jsm index b5c8cdd63cef8..5ec24cf47541c 100644 --- a/browser/extensions/activity-stream/common/Reducers.jsm +++ b/browser/extensions/activity-stream/common/Reducers.jsm @@ -16,6 +16,7 @@ const INITIAL_STATE = { // The version of the system-addon version: null }, + Snippets: {initialized: false}, TopSites: { // Have we received real data from history yet? initialized: false, @@ -29,7 +30,8 @@ const INITIAL_STATE = { Dialog: { visible: false, data: {} - } + }, + Sections: [] }; function App(prevState = INITIAL_STATE.App, action) { @@ -105,6 +107,9 @@ function TopSites(prevState = INITIAL_STATE.TopSites, action) { }); return hasMatch ? Object.assign({}, prevState, {rows: newRows}) : prevState; case at.PLACES_BOOKMARK_ADDED: + if (!action.data) { + return prevState; + } newRows = prevState.rows.map(site => { if (site && site.url === action.data.url) { const {bookmarkGuid, bookmarkTitle, lastModified} = action.data; @@ -114,6 +119,9 @@ function TopSites(prevState = INITIAL_STATE.TopSites, action) { }); return Object.assign({}, prevState, {rows: newRows}); case at.PLACES_BOOKMARK_REMOVED: + if (!action.data) { + return prevState; + } newRows = prevState.rows.map(site => { if (site && site.url === action.data.url) { const newSite = Object.assign({}, site); @@ -165,8 +173,58 @@ function Prefs(prevState = INITIAL_STATE.Prefs, action) { } } +function Sections(prevState = INITIAL_STATE.Sections, action) { + let hasMatch; + let newState; + switch (action.type) { + case at.SECTION_DEREGISTER: + return prevState.filter(section => section.id !== action.data); + case at.SECTION_REGISTER: + // If section exists in prevState, update it + newState = prevState.map(section => { + if (section && section.id === action.data.id) { + hasMatch = true; + return Object.assign({}, section, action.data); + } + return section; + }); + // If section doesn't exist in prevState, create a new section object and + // append it to the sections state + if (!hasMatch) { + const initialized = action.data.rows && action.data.rows.length > 0; + newState.push(Object.assign({title: "", initialized, rows: []}, action.data)); + } + return newState; + case at.SECTION_ROWS_UPDATE: + return prevState.map(section => { + if (section && section.id === action.data.id) { + return Object.assign({}, section, action.data); + } + return section; + }); + case at.PLACES_LINK_DELETED: + case at.PLACES_LINK_BLOCKED: + return prevState.map(section => + Object.assign({}, section, {rows: section.rows.filter(site => site.url !== action.data.url)})); + default: + return prevState; + } +} + +function Snippets(prevState = INITIAL_STATE.Snippets, action) { + switch (action.type) { + case at.SNIPPETS_DATA: + return Object.assign({}, prevState, {initialized: true}, action.data); + case at.SNIPPETS_RESET: + return INITIAL_STATE.Snippets; + default: + return prevState; + } +} + this.INITIAL_STATE = INITIAL_STATE; -this.reducers = {TopSites, App, Prefs, Dialog}; + +this.reducers = {TopSites, App, Snippets, Prefs, Dialog, Sections}; this.insertPinned = insertPinned; this.EXPORTED_SYMBOLS = ["reducers", "INITIAL_STATE", "insertPinned"]; diff --git a/browser/extensions/activity-stream/data/content/activity-stream.bundle.js b/browser/extensions/activity-stream/data/content/activity-stream.bundle.js index 1efb902d23f4f..47306715aace4 100644 --- a/browser/extensions/activity-stream/data/content/activity-stream.bundle.js +++ b/browser/extensions/activity-stream/data/content/activity-stream.bundle.js @@ -1,41 +1,41 @@ /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; -/******/ + /******/ // The require function /******/ function __webpack_require__(moduleId) { -/******/ + /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) /******/ return installedModules[moduleId].exports; -/******/ + /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; -/******/ + /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); -/******/ + /******/ // Flag the module as loaded /******/ module.l = true; -/******/ + /******/ // Return the exports of the module /******/ return module.exports; /******/ } -/******/ -/******/ + + /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; -/******/ + /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; -/******/ + /******/ // identity function for calling harmony imports with the correct context /******/ __webpack_require__.i = function(value) { return value; }; -/******/ + /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { @@ -46,7 +46,7 @@ /******/ }); /******/ } /******/ }; -/******/ + /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? @@ -55,19 +55,25 @@ /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; -/******/ + /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; -/******/ + /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; -/******/ + /******/ // Load entry module and return exports -/******/ return __webpack_require__(__webpack_require__.s = 19); +/******/ return __webpack_require__(__webpack_require__.s = 25); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ +/***/ (function(module, exports) { + +module.exports = React; + +/***/ }), +/* 1 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; @@ -97,7 +103,7 @@ const globalImportContext = typeof Window === "undefined" ? BACKGROUND_PROCESS : // UNINIT: "UNINIT" // } const actionTypes = {}; -for (const type of ["BLOCK_URL", "BOOKMARK_URL", "DELETE_BOOKMARK_BY_ID", "DELETE_HISTORY_URL", "DELETE_HISTORY_URL_CONFIRM", "DIALOG_CANCEL", "DIALOG_OPEN", "INIT", "LOCALE_UPDATED", "NEW_TAB_INITIAL_STATE", "NEW_TAB_LOAD", "NEW_TAB_UNLOAD", "NEW_TAB_VISIBLE", "OPEN_NEW_WINDOW", "OPEN_PRIVATE_WINDOW", "PINNED_SITES_UPDATED", "PLACES_BOOKMARK_ADDED", "PLACES_BOOKMARK_CHANGED", "PLACES_BOOKMARK_REMOVED", "PLACES_HISTORY_CLEARED", "PLACES_LINK_BLOCKED", "PLACES_LINK_DELETED", "PREFS_INITIAL_VALUES", "PREF_CHANGED", "SAVE_TO_POCKET", "SCREENSHOT_UPDATED", "SET_PREF", "TELEMETRY_PERFORMANCE_EVENT", "TELEMETRY_UNDESIRED_EVENT", "TELEMETRY_USER_EVENT", "TOP_SITES_PIN", "TOP_SITES_UNPIN", "TOP_SITES_UPDATED", "UNINIT"]) { +for (const type of ["BLOCK_URL", "BOOKMARK_URL", "DELETE_BOOKMARK_BY_ID", "DELETE_HISTORY_URL", "DELETE_HISTORY_URL_CONFIRM", "DIALOG_CANCEL", "DIALOG_OPEN", "FEED_INIT", "INIT", "LOCALE_UPDATED", "NEW_TAB_INITIAL_STATE", "NEW_TAB_LOAD", "NEW_TAB_UNLOAD", "NEW_TAB_VISIBLE", "OPEN_NEW_WINDOW", "OPEN_PRIVATE_WINDOW", "PINNED_SITES_UPDATED", "PLACES_BOOKMARK_ADDED", "PLACES_BOOKMARK_CHANGED", "PLACES_BOOKMARK_REMOVED", "PLACES_HISTORY_CLEARED", "PLACES_LINK_BLOCKED", "PLACES_LINK_DELETED", "PREFS_INITIAL_VALUES", "PREF_CHANGED", "SAVE_TO_POCKET", "SCREENSHOT_UPDATED", "SECTION_DEREGISTER", "SECTION_REGISTER", "SECTION_ROWS_UPDATE", "SET_PREF", "SNIPPETS_DATA", "SNIPPETS_RESET", "SYSTEM_TICK", "TELEMETRY_PERFORMANCE_EVENT", "TELEMETRY_UNDESIRED_EVENT", "TELEMETRY_USER_EVENT", "TOP_SITES_PIN", "TOP_SITES_UNPIN", "TOP_SITES_UPDATED", "UNINIT"]) { actionTypes[type] = type; } @@ -275,23 +281,17 @@ module.exports = { CONTENT_MESSAGE_TYPE }; -/***/ }), -/* 1 */ -/***/ (function(module, exports) { - -module.exports = React; - /***/ }), /* 2 */ /***/ (function(module, exports) { -module.exports = ReactRedux; +module.exports = ReactIntl; /***/ }), /* 3 */ /***/ (function(module, exports) { -module.exports = ReactIntl; +module.exports = ReactRedux; /***/ }), /* 4 */ @@ -325,7 +325,8 @@ module.exports = function shortURL(link) { // Remove the eTLD (e.g., com, net) and the preceding period from the hostname const eTLDLength = (eTLD || "").length || hostname.match(/\.com$/) && 3; const eTLDExtra = eTLDLength > 0 ? -(eTLDLength + 1) : Infinity; - return hostname.slice(0, eTLDExtra).toLowerCase() || hostname; + // If URL and hostname are not present fallback to page title. + return hostname.slice(0, eTLDExtra).toLowerCase() || hostname || link.title; }; /***/ }), @@ -335,21 +336,95 @@ module.exports = function shortURL(link) { "use strict"; -const React = __webpack_require__(1); +const React = __webpack_require__(0); var _require = __webpack_require__(2); +const injectIntl = _require.injectIntl; + +const ContextMenu = __webpack_require__(15); + +var _require2 = __webpack_require__(1); + +const ac = _require2.actionCreators; + +const linkMenuOptions = __webpack_require__(21); +const DEFAULT_SITE_MENU_OPTIONS = ["CheckPinTopSite", "Separator", "OpenInNewWindow", "OpenInPrivateWindow"]; + +class LinkMenu extends React.Component { + getOptions() { + const props = this.props; + const site = props.site, + index = props.index, + source = props.source; + + // Handle special case of default site + + const propOptions = !site.isDefault ? props.options : DEFAULT_SITE_MENU_OPTIONS; + + const options = propOptions.map(o => linkMenuOptions[o](site, index)).map(option => { + const action = option.action, + id = option.id, + type = option.type, + userEvent = option.userEvent; + + if (!type && id) { + option.label = props.intl.formatMessage(option); + option.onClick = () => { + props.dispatch(action); + if (userEvent) { + props.dispatch(ac.UserEvent({ + event: userEvent, + source, + action_position: index + })); + } + }; + } + return option; + }); + + // This is for accessibility to support making each item tabbable. + // We want to know which item is the first and which item + // is the last, so we can close the context menu accordingly. + options[0].first = true; + options[options.length - 1].last = true; + return options; + } + render() { + return React.createElement(ContextMenu, { + visible: this.props.visible, + onUpdate: this.props.onUpdate, + options: this.getOptions() }); + } +} + +module.exports = injectIntl(LinkMenu); +module.exports._unconnected = LinkMenu; + +/***/ }), +/* 6 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +const React = __webpack_require__(0); + +var _require = __webpack_require__(3); + const connect = _require.connect; -var _require2 = __webpack_require__(3); +var _require2 = __webpack_require__(2); const addLocaleData = _require2.addLocaleData, IntlProvider = _require2.IntlProvider; -const TopSites = __webpack_require__(15); -const Search = __webpack_require__(14); -const ConfirmDialog = __webpack_require__(10); -const PreferencesPane = __webpack_require__(13); +const TopSites = __webpack_require__(19); +const Search = __webpack_require__(17); +const ConfirmDialog = __webpack_require__(14); +const PreferencesPane = __webpack_require__(16); +const Sections = __webpack_require__(18); // Locales that should be displayed RTL const RTL_LIST = ["ar", "he", "fa", "ur"]; @@ -409,6 +484,7 @@ class Base extends React.Component { null, prefs.showSearch && React.createElement(Search, null), prefs.showTopSites && React.createElement(TopSites, null), + React.createElement(Sections, null), React.createElement(ConfirmDialog, null) ), React.createElement(PreferencesPane, null) @@ -420,17 +496,17 @@ class Base extends React.Component { module.exports = connect(state => ({ App: state.App, Prefs: state.Prefs }))(Base); /***/ }), -/* 6 */ +/* 7 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -var _require = __webpack_require__(0); +var _require = __webpack_require__(1); const at = _require.actionTypes; -var _require2 = __webpack_require__(17); +var _require2 = __webpack_require__(22); const perfSvc = _require2.perfService; @@ -495,7 +571,7 @@ module.exports = class DetectUserSessionStart { }; /***/ }), -/* 7 */ +/* 8 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; @@ -503,13 +579,13 @@ module.exports = class DetectUserSessionStart { /* eslint-env mozilla/frame-script */ -var _require = __webpack_require__(18); +var _require = __webpack_require__(24); const createStore = _require.createStore, combineReducers = _require.combineReducers, applyMiddleware = _require.applyMiddleware; -var _require2 = __webpack_require__(0); +var _require2 = __webpack_require__(1); const au = _require2.actionUtils; @@ -564,7 +640,12 @@ module.exports = function initStore(reducers) { const store = createStore(mergeStateReducer(combineReducers(reducers)), applyMiddleware(messageMiddleware)); addMessageListener(INCOMING_MESSAGE_NAME, msg => { - store.dispatch(msg.data); + try { + store.dispatch(msg.data); + } catch (ex) { + console.error("Content msg:", msg, "Dispatch error: ", ex); // eslint-disable-line no-console + dump(`Content msg: ${ JSON.stringify(msg) }\nDispatch error: ${ ex }\n${ ex.stack }`); + } }); return store; @@ -575,7 +656,274 @@ module.exports.OUTGOING_MESSAGE_NAME = OUTGOING_MESSAGE_NAME; module.exports.INCOMING_MESSAGE_NAME = INCOMING_MESSAGE_NAME; /***/ }), -/* 8 */ +/* 9 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; +/* WEBPACK VAR INJECTION */(function(global) { + +const DATABASE_NAME = "snippets_db"; +const DATABASE_VERSION = 1; +const SNIPPETS_OBJECTSTORE_NAME = "snippets"; +const SNIPPETS_UPDATE_INTERVAL_MS = 14400000; // 4 hours. + +/** + * SnippetsMap - A utility for cacheing values related to the snippet. It has + * the same interface as a Map, but is optionally backed by + * indexedDB for persistent storage. + * Call .connect() to open a database connection and restore any + * previously cached data, if necessary. + * + */ +class SnippetsMap extends Map { + constructor() { + super(...arguments); + this._db = null; + } + + set(key, value) { + super.set(key, value); + return this._dbTransaction(db => db.put(value, key)); + } + + delete(key, value) { + super.delete(key); + return this._dbTransaction(db => db.delete(key)); + } + + clear() { + super.clear(); + return this._dbTransaction(db => db.clear()); + } + + /** + * connect - Attaches an indexedDB back-end to the Map so that any set values + * are also cached in a store. It also restores any existing values + * that are already stored in the indexedDB store. + * + * @return {type} description + */ + async connect() { + // Open the connection + const db = await this._openDB(); + + // Restore any existing values + await this._restoreFromDb(db); + + // Attach a reference to the db + this._db = db; + } + + /** + * _dbTransaction - Returns a db transaction wrapped with the given modifier + * function as a Promise. If the db has not been connected, + * it resolves immediately. + * + * @param {func} modifier A function to call with the transaction + * @return {obj} A Promise that resolves when the transaction has + * completed or errored + */ + _dbTransaction(modifier) { + if (!this._db) { + return Promise.resolve(); + } + return new Promise((resolve, reject) => { + const transaction = modifier(this._db.transaction(SNIPPETS_OBJECTSTORE_NAME, "readwrite").objectStore(SNIPPETS_OBJECTSTORE_NAME)); + transaction.onsuccess = event => resolve(); + + /* istanbul ignore next */ + transaction.onerror = event => reject(transaction.error); + }); + } + + _openDB() { + return new Promise((resolve, reject) => { + const openRequest = indexedDB.open(DATABASE_NAME, DATABASE_VERSION); + + /* istanbul ignore next */ + openRequest.onerror = event => { + // Try to delete the old database so that we can start this process over + // next time. + indexedDB.deleteDatabase(DATABASE_NAME); + reject(event); + }; + + openRequest.onupgradeneeded = event => { + const db = event.target.result; + if (!db.objectStoreNames.contains(SNIPPETS_OBJECTSTORE_NAME)) { + db.createObjectStore(SNIPPETS_OBJECTSTORE_NAME); + } + }; + + openRequest.onsuccess = event => { + let db = event.target.result; + + /* istanbul ignore next */ + db.onerror = err => console.error(err); // eslint-disable-line no-console + /* istanbul ignore next */ + db.onversionchange = versionChangeEvent => versionChangeEvent.target.close(); + + resolve(db); + }; + }); + } + + _restoreFromDb(db) { + return new Promise((resolve, reject) => { + let cursorRequest; + try { + cursorRequest = db.transaction(SNIPPETS_OBJECTSTORE_NAME).objectStore(SNIPPETS_OBJECTSTORE_NAME).openCursor(); + } catch (err) { + // istanbul ignore next + reject(err); + // istanbul ignore next + return; + } + + /* istanbul ignore next */ + cursorRequest.onerror = event => reject(event); + + cursorRequest.onsuccess = event => { + let cursor = event.target.result; + // Populate the cache from the persistent storage. + if (cursor) { + this.set(cursor.key, cursor.value); + cursor.continue(); + } else { + // We are done. + resolve(); + } + }; + }); + } +} + +/** + * SnippetsProvider - Initializes a SnippetsMap and loads snippets from a + * remote location, or else default snippets if the remote + * snippets cannot be retrieved. + */ +class SnippetsProvider { + constructor() { + // Initialize the Snippets Map and attaches it to a global so that + // the snippet payload can interact with it. + global.gSnippetsMap = new SnippetsMap(); + } + + get snippetsMap() { + return global.gSnippetsMap; + } + + async _refreshSnippets() { + // Check if the cached version of of the snippets in snippetsMap. If it's too + // old, blow away the entire snippetsMap. + const cachedVersion = this.snippetsMap.get("snippets-cached-version"); + if (cachedVersion !== this.version) { + this.snippetsMap.clear(); + } + + // Has enough time passed for us to require an update? + const lastUpdate = this.snippetsMap.get("snippets-last-update"); + const needsUpdate = !(lastUpdate >= 0) || Date.now() - lastUpdate > SNIPPETS_UPDATE_INTERVAL_MS; + + if (needsUpdate && this.snippetsURL) { + this.snippetsMap.set("snippets-last-update", Date.now()); + try { + // TODO: timeout? + const response = await fetch(this.snippetsURL); + if (response.status === 200) { + const payload = await response.text(); + + this.snippetsMap.set("snippets", payload); + this.snippetsMap.set("snippets-cached-version", this.version); + } + } catch (e) { + console.error(e); // eslint-disable-line no-console + } + } + } + + _showDefaultSnippets() { + // TODO + } + + _showRemoteSnippets() { + const snippetsEl = document.getElementById(this.elementId); + const containerEl = document.getElementById(this.containerElementId); + const payload = this.snippetsMap.get("snippets"); + + if (!snippetsEl) { + throw new Error(`No element was found with id '${ this.elementId }'.`); + } + + // This could happen if fetching failed + if (!payload) { + throw new Error("No remote snippets were found in gSnippetsMap."); + } + + // Note that injecting snippets can throw if they're invalid XML. + snippetsEl.innerHTML = payload; + + // Scripts injected by innerHTML are inactive, so we have to relocate them + // through DOM manipulation to activate their contents. + for (const scriptEl of snippetsEl.getElementsByTagName("script")) { + const relocatedScript = document.createElement("script"); + relocatedScript.text = scriptEl.text; + scriptEl.parentNode.replaceChild(relocatedScript, scriptEl); + } + + // Unhide the container if everything went OK + if (containerEl) { + containerEl.style.display = "block"; + } + } + + /** + * init - Fetch the snippet payload and show snippets + * + * @param {obj} options + * @param {str} options.snippetsURL The URL from which we fetch snippets + * @param {int} options.version The current snippets version + * @param {str} options.elementId The id of the element of the snippets container + */ + async init(options) { + Object.assign(this, { + snippetsURL: "", + version: 0, + elementId: "snippets", + containerElementId: "snippets-container", + connect: true + }, options); + + // TODO: Requires enabling indexedDB on newtab + // Restore the snippets map from indexedDB + if (this.connect) { + try { + await this.snippetsMap.connect(); + } catch (e) { + console.error(e); // eslint-disable-line no-console + } + } + + // Refresh snippets, if enough time has passed. + await this._refreshSnippets(); + + // Try showing remote snippets, falling back to defaults if necessary. + try { + this._showRemoteSnippets(); + } catch (e) { + this._showDefaultSnippets(e); + } + } +} + +module.exports.SnippetsMap = SnippetsMap; +module.exports.SnippetsProvider = SnippetsProvider; +module.exports.SNIPPETS_UPDATE_INTERVAL_MS = SNIPPETS_UPDATE_INTERVAL_MS; +/* WEBPACK VAR INJECTION */}.call(exports, __webpack_require__(23))) + +/***/ }), +/* 10 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; @@ -584,7 +932,7 @@ module.exports.INCOMING_MESSAGE_NAME = INCOMING_MESSAGE_NAME; * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -var _require = __webpack_require__(0); +var _require = __webpack_require__(1); const at = _require.actionTypes; @@ -600,6 +948,7 @@ const INITIAL_STATE = { // The version of the system-addon version: null }, + Snippets: { initialized: false }, TopSites: { // Have we received real data from history yet? initialized: false, @@ -613,7 +962,8 @@ const INITIAL_STATE = { Dialog: { visible: false, data: {} - } + }, + Sections: [] }; function App() { @@ -701,6 +1051,9 @@ function TopSites() { }); return hasMatch ? Object.assign({}, prevState, { rows: newRows }) : prevState; case at.PLACES_BOOKMARK_ADDED: + if (!action.data) { + return prevState; + } newRows = prevState.rows.map(site => { if (site && site.url === action.data.url) { var _action$data2 = action.data; @@ -714,6 +1067,9 @@ function TopSites() { }); return Object.assign({}, prevState, { rows: newRows }); case at.PLACES_BOOKMARK_REMOVED: + if (!action.data) { + return prevState; + } newRows = prevState.rows.map(site => { if (site && site.url === action.data.url) { const newSite = Object.assign({}, site); @@ -771,7 +1127,61 @@ function Prefs() { } } -var reducers = { TopSites, App, Prefs, Dialog }; +function Sections() { + let prevState = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : INITIAL_STATE.Sections; + let action = arguments[1]; + + let hasMatch; + let newState; + switch (action.type) { + case at.SECTION_DEREGISTER: + return prevState.filter(section => section.id !== action.data); + case at.SECTION_REGISTER: + // If section exists in prevState, update it + newState = prevState.map(section => { + if (section && section.id === action.data.id) { + hasMatch = true; + return Object.assign({}, section, action.data); + } + return section; + }); + // If section doesn't exist in prevState, create a new section object and + // append it to the sections state + if (!hasMatch) { + const initialized = action.data.rows && action.data.rows.length > 0; + newState.push(Object.assign({ title: "", initialized, rows: [] }, action.data)); + } + return newState; + case at.SECTION_ROWS_UPDATE: + return prevState.map(section => { + if (section && section.id === action.data.id) { + return Object.assign({}, section, action.data); + } + return section; + }); + case at.PLACES_LINK_DELETED: + case at.PLACES_LINK_BLOCKED: + return prevState.map(section => Object.assign({}, section, { rows: section.rows.filter(site => site.url !== action.data.url) })); + default: + return prevState; + } +} + +function Snippets() { + let prevState = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : INITIAL_STATE.Snippets; + let action = arguments[1]; + + switch (action.type) { + case at.SNIPPETS_DATA: + return Object.assign({}, prevState, { initialized: true }, action.data); + case at.SNIPPETS_RESET: + return INITIAL_STATE.Snippets; + default: + return prevState; + } +} + +var reducers = { TopSites, App, Snippets, Prefs, Dialog, Sections }; module.exports = { reducers, INITIAL_STATE, @@ -779,29 +1189,175 @@ module.exports = { }; /***/ }), -/* 9 */ +/* 11 */ /***/ (function(module, exports) { module.exports = ReactDOM; /***/ }), -/* 10 */ +/* 12 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -const React = __webpack_require__(1); +const React = __webpack_require__(0); +const LinkMenu = __webpack_require__(5); +const shortURL = __webpack_require__(4); var _require = __webpack_require__(2); +const FormattedMessage = _require.FormattedMessage; + +const cardContextTypes = __webpack_require__(13); + +/** + * Card component. + * Cards are found within a Section component and contain information about a link such + * as preview image, page title, page description, and some context about if the page + * was visited, bookmarked, trending etc... + * Each Section can make an unordered list of Cards which will create one instane of + * this class. Each card will then get a context menu which reflects the actions that + * can be done on this Card. + */ +class Card extends React.Component { + constructor(props) { + super(props); + this.state = { showContextMenu: false, activeCard: null }; + } + toggleContextMenu(event, index) { + this.setState({ showContextMenu: true, activeCard: index }); + } + render() { + var _props = this.props; + const index = _props.index, + link = _props.link, + dispatch = _props.dispatch, + contextMenuOptions = _props.contextMenuOptions; + + const isContextMenuOpen = this.state.showContextMenu && this.state.activeCard === index; + const hostname = shortURL(link); + var _cardContextTypes$lin = cardContextTypes[link.type]; + const icon = _cardContextTypes$lin.icon, + intlID = _cardContextTypes$lin.intlID; + + + return React.createElement( + "li", + { className: `card-outer${ isContextMenuOpen ? " active" : "" }` }, + React.createElement( + "a", + { href: link.url }, + React.createElement( + "div", + { className: "card" }, + link.image && React.createElement("div", { className: "card-preview-image", style: { backgroundImage: `url(${ link.image })` } }), + React.createElement( + "div", + { className: "card-details" }, + React.createElement( + "div", + { className: "card-host-name" }, + " ", + hostname, + " " + ), + React.createElement( + "div", + { className: `card-text${ link.image ? "" : " full-height" }` }, + React.createElement( + "h4", + { className: "card-title" }, + " ", + link.title, + " " + ), + React.createElement( + "p", + { className: "card-description" }, + " ", + link.description, + " " + ) + ), + React.createElement( + "div", + { className: "card-context" }, + React.createElement("span", { className: `card-context-icon icon icon-${ icon }` }), + React.createElement( + "div", + { className: "card-context-label" }, + React.createElement(FormattedMessage, { id: intlID, defaultMessage: "Visited" }) + ) + ) + ) + ) + ), + React.createElement( + "button", + { className: "context-menu-button", + onClick: e => { + e.preventDefault(); + this.toggleContextMenu(e, index); + } }, + React.createElement( + "span", + { className: "sr-only" }, + `Open context menu for ${ link.title }` + ) + ), + React.createElement(LinkMenu, { + dispatch: dispatch, + visible: isContextMenuOpen, + onUpdate: val => this.setState({ showContextMenu: val }), + index: index, + site: link, + options: link.context_menu_options || contextMenuOptions }) + ); + } +} +module.exports = Card; + +/***/ }), +/* 13 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +module.exports = { + history: { + intlID: "type_label_visited", + icon: "historyItem" + }, + bookmark: { + intlID: "type_label_bookmarked", + icon: "bookmark" + }, + trending: { + intlID: "type_label_recommended", + icon: "trending" + } +}; + +/***/ }), +/* 14 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +const React = __webpack_require__(0); + +var _require = __webpack_require__(3); + const connect = _require.connect; -var _require2 = __webpack_require__(3); +var _require2 = __webpack_require__(2); const FormattedMessage = _require2.FormattedMessage; -var _require3 = __webpack_require__(0); +var _require3 = __webpack_require__(1); const actionTypes = _require3.actionTypes, ac = _require3.actionCreators; @@ -904,13 +1460,13 @@ module.exports._unconnected = ConfirmDialog; module.exports.Dialog = ConfirmDialog; /***/ }), -/* 11 */ +/* 15 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -const React = __webpack_require__(1); +const React = __webpack_require__(0); class ContextMenu extends React.Component { constructor(props) { @@ -974,7 +1530,7 @@ class ContextMenu extends React.Component { this.hideContext(); option.onClick(); } }, - option.icon && React.createElement("span", { className: `icon icon-spacer icon-${option.icon}` }), + option.icon && React.createElement("span", { className: `icon icon-spacer icon-${ option.icon }` }), option.label ) ); @@ -987,97 +1543,24 @@ class ContextMenu extends React.Component { module.exports = ContextMenu; /***/ }), -/* 12 */ +/* 16 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -const React = __webpack_require__(1); +const React = __webpack_require__(0); var _require = __webpack_require__(3); -const injectIntl = _require.injectIntl; - -const ContextMenu = __webpack_require__(11); - -var _require2 = __webpack_require__(0); - -const ac = _require2.actionCreators; - -const linkMenuOptions = __webpack_require__(16); -const DEFAULT_SITE_MENU_OPTIONS = ["CheckPinTopSite", "Separator", "OpenInNewWindow", "OpenInPrivateWindow"]; - -class LinkMenu extends React.Component { - getOptions() { - const props = this.props; - const site = props.site, - index = props.index, - source = props.source; - - // Handle special case of default site - - const propOptions = !site.isDefault ? props.options : DEFAULT_SITE_MENU_OPTIONS; - - const options = propOptions.map(o => linkMenuOptions[o](site, index)).map(option => { - const action = option.action, - id = option.id, - type = option.type, - userEvent = option.userEvent; - - if (!type && id) { - option.label = props.intl.formatMessage(option); - option.onClick = () => { - props.dispatch(action); - if (userEvent) { - props.dispatch(ac.UserEvent({ - event: userEvent, - source, - action_position: index - })); - } - }; - } - return option; - }); - - // This is for accessibility to support making each item tabbable. - // We want to know which item is the first and which item - // is the last, so we can close the context menu accordingly. - options[0].first = true; - options[options.length - 1].last = true; - return options; - } - render() { - return React.createElement(ContextMenu, { - visible: this.props.visible, - onUpdate: this.props.onUpdate, - options: this.getOptions() }); - } -} - -module.exports = injectIntl(LinkMenu); -module.exports._unconnected = LinkMenu; - -/***/ }), -/* 13 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -const React = __webpack_require__(1); - -var _require = __webpack_require__(2); - const connect = _require.connect; -var _require2 = __webpack_require__(3); +var _require2 = __webpack_require__(2); const injectIntl = _require2.injectIntl, FormattedMessage = _require2.FormattedMessage; -var _require3 = __webpack_require__(0); +var _require3 = __webpack_require__(1); const ac = _require3.actionCreators; @@ -1138,7 +1621,7 @@ class PreferencesPane extends React.Component { "div", { className: "prefs-pane-button" }, React.createElement("button", { - className: `prefs-button icon ${isVisible ? "icon-dismiss" : "icon-settings"}`, + className: `prefs-button icon ${ isVisible ? "icon-dismiss" : "icon-settings" }`, title: props.intl.formatMessage({ id: isVisible ? "settings_pane_done_button" : "settings_pane_button_label" }), onClick: this.togglePane }) ), @@ -1147,7 +1630,7 @@ class PreferencesPane extends React.Component { { className: "prefs-pane" }, React.createElement( "div", - { className: `sidebar ${isVisible ? "" : "hidden"}` }, + { className: `sidebar ${ isVisible ? "" : "hidden" }` }, React.createElement( "div", { className: "prefs-modal-inner-wrapper" }, @@ -1164,7 +1647,9 @@ class PreferencesPane extends React.Component { React.createElement(PreferencesInput, { className: "showSearch", prefName: "showSearch", value: prefs.showSearch, onChange: this.handleChange, titleStringId: "settings_pane_search_header", descStringId: "settings_pane_search_body" }), React.createElement(PreferencesInput, { className: "showTopSites", prefName: "showTopSites", value: prefs.showTopSites, onChange: this.handleChange, - titleStringId: "settings_pane_topsites_header", descStringId: "settings_pane_topsites_body" }) + titleStringId: "settings_pane_topsites_header", descStringId: "settings_pane_topsites_body" }), + React.createElement(PreferencesInput, { className: "showTopStories", prefName: "feeds.section.topstories", value: prefs["feeds.section.topstories"], onChange: this.handleChange, + titleStringId: "settings_pane_pocketstories_header", descStringId: "settings_pane_pocketstories_body" }) ), React.createElement( "section", @@ -1186,25 +1671,25 @@ module.exports.PreferencesPane = PreferencesPane; module.exports.PreferencesInput = PreferencesInput; /***/ }), -/* 14 */ +/* 17 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /* globals ContentSearchUIController */ -const React = __webpack_require__(1); +const React = __webpack_require__(0); -var _require = __webpack_require__(2); +var _require = __webpack_require__(3); const connect = _require.connect; -var _require2 = __webpack_require__(3); +var _require2 = __webpack_require__(2); const FormattedMessage = _require2.FormattedMessage, injectIntl = _require2.injectIntl; -var _require3 = __webpack_require__(0); +var _require3 = __webpack_require__(1); const ac = _require3.actionCreators; @@ -1285,26 +1770,146 @@ module.exports = connect()(injectIntl(Search)); module.exports._unconnected = Search; /***/ }), -/* 15 */ +/* 18 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -const React = __webpack_require__(1); +var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; -var _require = __webpack_require__(2); +const React = __webpack_require__(0); + +var _require = __webpack_require__(3); const connect = _require.connect; -var _require2 = __webpack_require__(3); +var _require2 = __webpack_require__(2); + +const FormattedMessage = _require2.FormattedMessage; + +const Card = __webpack_require__(12); +const Topics = __webpack_require__(20); + +class Section extends React.Component { + render() { + var _props = this.props; + const id = _props.id, + title = _props.title, + icon = _props.icon, + rows = _props.rows, + infoOption = _props.infoOption, + emptyState = _props.emptyState, + dispatch = _props.dispatch, + maxCards = _props.maxCards, + contextMenuOptions = _props.contextMenuOptions; + + const initialized = rows && rows.length > 0; + const shouldShowTopics = id === "TopStories" && this.props.topics && this.props.read_more_endpoint; + //
<-- React component + //
<-- HTML5 element + return React.createElement( + "section", + null, + React.createElement( + "div", + { className: "section-top-bar" }, + React.createElement( + "h3", + { className: "section-title" }, + React.createElement("span", { className: `icon icon-small-spacer icon-${ icon }` }), + React.createElement(FormattedMessage, title) + ), + infoOption && React.createElement( + "span", + { className: "section-info-option" }, + React.createElement( + "span", + { className: "sr-only" }, + React.createElement(FormattedMessage, { id: "section_info_option" }) + ), + React.createElement("img", { className: "info-option-icon" }), + React.createElement( + "div", + { className: "info-option" }, + infoOption.header && React.createElement( + "div", + { className: "info-option-header" }, + React.createElement(FormattedMessage, infoOption.header) + ), + infoOption.body && React.createElement( + "p", + { className: "info-option-body" }, + React.createElement(FormattedMessage, infoOption.body) + ), + infoOption.link && React.createElement( + "a", + { href: infoOption.link.href, target: "_blank", rel: "noopener noreferrer", className: "info-option-link" }, + React.createElement(FormattedMessage, infoOption.link) + ) + ) + ) + ), + React.createElement( + "ul", + { className: "section-list", style: { padding: 0 } }, + rows.slice(0, maxCards).map((link, index) => link && React.createElement(Card, { index: index, dispatch: dispatch, link: link, contextMenuOptions: contextMenuOptions })) + ), + !initialized && React.createElement( + "div", + { className: "section-empty-state" }, + React.createElement( + "div", + { className: "empty-state" }, + React.createElement("img", { className: `empty-state-icon icon icon-${ emptyState.icon }` }), + React.createElement( + "p", + { className: "empty-state-message" }, + React.createElement(FormattedMessage, emptyState.message) + ) + ) + ), + shouldShowTopics && React.createElement(Topics, { topics: this.props.topics, read_more_endpoint: this.props.read_more_endpoint }) + ); + } +} + +class Sections extends React.Component { + render() { + const sections = this.props.Sections; + return React.createElement( + "div", + { className: "sections-list" }, + sections.map(section => React.createElement(Section, _extends({ key: section.id }, section, { dispatch: this.props.dispatch }))) + ); + } +} + +module.exports = connect(state => ({ Sections: state.Sections }))(Sections); +module.exports._unconnected = Sections; +module.exports.Section = Section; + +/***/ }), +/* 19 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +const React = __webpack_require__(0); + +var _require = __webpack_require__(3); + +const connect = _require.connect; + +var _require2 = __webpack_require__(2); const FormattedMessage = _require2.FormattedMessage; const shortURL = __webpack_require__(4); -const LinkMenu = __webpack_require__(12); +const LinkMenu = __webpack_require__(5); -var _require3 = __webpack_require__(0); +var _require3 = __webpack_require__(1); const ac = _require3.actionCreators; @@ -1334,12 +1939,12 @@ class TopSite extends React.Component { const isContextMenuOpen = this.state.showContextMenu && this.state.activeTile === index; const title = link.pinTitle || shortURL(link); - const screenshotClassName = `screenshot${link.screenshot ? " active" : ""}`; - const topSiteOuterClassName = `top-site-outer${isContextMenuOpen ? " active" : ""}`; - const style = { backgroundImage: link.screenshot ? `url(${link.screenshot})` : "none" }; + const screenshotClassName = `screenshot${ link.screenshot ? " active" : "" }`; + const topSiteOuterClassName = `top-site-outer${ isContextMenuOpen ? " active" : "" }`; + const style = { backgroundImage: link.screenshot ? `url(${ link.screenshot })` : "none" }; return React.createElement( "li", - { className: topSiteOuterClassName, key: link.url }, + { className: topSiteOuterClassName, key: link.guid || link.url }, React.createElement( "a", { onClick: () => this.trackClick(), href: link.url }, @@ -1355,7 +1960,7 @@ class TopSite extends React.Component { ), React.createElement( "div", - { className: `title ${link.isPinned ? "pinned" : ""}` }, + { className: `title ${ link.isPinned ? "pinned" : "" }` }, link.isPinned && React.createElement("div", { className: "icon icon-pin-small" }), React.createElement( "span", @@ -1374,7 +1979,7 @@ class TopSite extends React.Component { React.createElement( "span", { className: "sr-only" }, - `Open context menu for ${title}` + `Open context menu for ${ title }` ) ), React.createElement(LinkMenu, { @@ -1401,7 +2006,7 @@ const TopSites = props => React.createElement( "ul", { className: "top-sites-list" }, props.TopSites.rows.map((link, index) => link && React.createElement(TopSite, { - key: link.url, + key: link.guid || link.url, dispatch: props.dispatch, link: link, index: index })) @@ -1413,13 +2018,78 @@ module.exports._unconnected = TopSites; module.exports.TopSite = TopSite; /***/ }), -/* 16 */ +/* 20 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -var _require = __webpack_require__(0); +const React = __webpack_require__(0); + +var _require = __webpack_require__(2); + +const FormattedMessage = _require.FormattedMessage; + + +class Topic extends React.Component { + render() { + var _props = this.props; + const url = _props.url, + name = _props.name; + + return React.createElement( + "li", + null, + React.createElement( + "a", + { key: name, className: "topic-link", href: url }, + name + ) + ); + } +} + +class Topics extends React.Component { + render() { + var _props2 = this.props; + const topics = _props2.topics, + read_more_endpoint = _props2.read_more_endpoint; + + return React.createElement( + "div", + { className: "topic" }, + React.createElement( + "span", + null, + React.createElement(FormattedMessage, { id: "pocket_read_more" }) + ), + React.createElement( + "ul", + null, + topics.map(t => React.createElement(Topic, { key: t.name, url: t.url, name: t.name })) + ), + React.createElement( + "a", + { className: "topic-read-more", href: read_more_endpoint }, + React.createElement(FormattedMessage, { id: "pocket_read_even_more" }), + React.createElement("span", { className: "topic-read-more-logo" }) + ) + ); + } +} + +module.exports = Topics; +module.exports._unconnected = Topics; +module.exports.Topic = Topic; + +/***/ }), +/* 21 */ +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; + + +var _require = __webpack_require__(1); const at = _require.actionTypes, ac = _require.actionCreators; @@ -1524,7 +2194,7 @@ module.exports.CheckBookmark = site => site.bookmarkGuid ? module.exports.Remove module.exports.CheckPinTopSite = (site, index) => site.isPinned ? module.exports.UnpinTopSite(site) : module.exports.PinTopSite(site, index); /***/ }), -/* 17 */ +/* 22 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; @@ -1614,7 +2284,7 @@ _PerfService.prototype = { let entries = this.getEntriesByName(name, "mark"); if (!entries.length) { - throw new Error(`No marks with the name ${name}`); + throw new Error(`No marks with the name ${ name }`); } let mostRecentEntry = entries[entries.length - 1]; @@ -1629,33 +2299,65 @@ module.exports = { }; /***/ }), -/* 18 */ +/* 23 */ +/***/ (function(module, exports) { + +var g; + +// This works in non-strict mode +g = (function() { + return this; +})(); + +try { + // This works if eval is allowed (see CSP) + g = g || Function("return this")() || (1,eval)("this"); +} catch(e) { + // This works if the window reference is available + if(typeof window === "object") + g = window; +} + +// g can still be undefined, but nothing to do about it... +// We return undefined, instead of nothing here, so it's +// easier to handle this case. if(!global) { ...} + +module.exports = g; + + +/***/ }), +/* 24 */ /***/ (function(module, exports) { module.exports = Redux; /***/ }), -/* 19 */ +/* 25 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; -const React = __webpack_require__(1); -const ReactDOM = __webpack_require__(9); -const Base = __webpack_require__(5); +const React = __webpack_require__(0); +const ReactDOM = __webpack_require__(11); +const Base = __webpack_require__(6); -var _require = __webpack_require__(2); +var _require = __webpack_require__(3); const Provider = _require.Provider; -const initStore = __webpack_require__(7); +const initStore = __webpack_require__(8); -var _require2 = __webpack_require__(8); +var _require2 = __webpack_require__(10); const reducers = _require2.reducers; -const DetectUserSessionStart = __webpack_require__(6); +const DetectUserSessionStart = __webpack_require__(7); + +var _require3 = __webpack_require__(9); + +const SnippetsProvider = _require3.SnippetsProvider; + new DetectUserSessionStart().sendEventOrAddListener(); @@ -1667,5 +2369,18 @@ ReactDOM.render(React.createElement( React.createElement(Base, null) ), document.getElementById("root")); +// Trigger snippets when snippets data has been received. +const snippets = new SnippetsProvider(); +const unsubscribe = store.subscribe(() => { + const state = store.getState(); + if (state.Snippets.initialized) { + snippets.init({ + snippetsURL: state.Snippets.snippetsURL, + version: state.Snippets.version + }); + unsubscribe(); + } +}); + /***/ }) /******/ ]); \ No newline at end of file diff --git a/browser/extensions/activity-stream/data/content/activity-stream.css b/browser/extensions/activity-stream/data/content/activity-stream.css index 0bea565c57e26..320ae633d6a9a 100644 --- a/browser/extensions/activity-stream/data/content/activity-stream.css +++ b/browser/extensions/activity-stream/data/content/activity-stream.css @@ -1,3 +1,4 @@ +@charset "UTF-8"; html { box-sizing: border-box; } @@ -30,6 +31,8 @@ input { vertical-align: middle; } .icon.icon-spacer { margin-inline-end: 8px; } + .icon.icon-small-spacer { + margin-inline-end: 6px; } .icon.icon-bookmark { background-image: url("assets/glyph-bookmark-16.svg"); } .icon.icon-bookmark-remove { @@ -50,11 +53,19 @@ input { background-image: url("assets/glyph-unpin-16.svg"); } .icon.icon-pocket { background-image: url("assets/glyph-pocket-16.svg"); } + .icon.icon-historyItem { + background-image: url("assets/glyph-historyItem-16.svg"); } + .icon.icon-trending { + background-image: url("assets/glyph-trending-16.svg"); } + .icon.icon-now { + background-image: url("assets/glyph-now-16.svg"); } .icon.icon-pin-small { background-image: url("assets/glyph-pin-12.svg"); background-size: 12px; height: 12px; width: 12px; } + .icon.icon-check { + background-image: url("chrome://browser/skin/check.svg"); } html, body, @@ -134,6 +145,19 @@ a { color: #FFF; margin-inline-start: auto; } +#snippets-container { + display: none; + position: fixed; + bottom: 0; + left: 0; + right: 0; + background: white; + height: 122px; } + +#snippets { + max-width: 736px; + margin: 0 auto; } + .outer-wrapper { display: flex; flex-grow: 1; @@ -149,7 +173,7 @@ main { main { width: 736px; } } main section { - margin-bottom: 41px; } + margin-bottom: 40px; } .section-title { color: #6E707E; @@ -205,10 +229,10 @@ main { .top-sites-list .top-site-outer .context-menu-button:focus, .top-sites-list .top-site-outer .context-menu-button:active { transform: scale(1); opacity: 1; } - .top-sites-list .top-site-outer:hover .tile, .top-sites-list .top-site-outer:active .tile, .top-sites-list .top-site-outer:focus .tile, .top-sites-list .top-site-outer.active .tile { + .top-sites-list .top-site-outer:hover .tile, .top-sites-list .top-site-outer:focus .tile, .top-sites-list .top-site-outer.active .tile { box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.1), 0 0 0 5px rgba(0, 0, 0, 0.1); transition: box-shadow 150ms; } - .top-sites-list .top-site-outer:hover .context-menu-button, .top-sites-list .top-site-outer:active .context-menu-button, .top-sites-list .top-site-outer:focus .context-menu-button, .top-sites-list .top-site-outer.active .context-menu-button { + .top-sites-list .top-site-outer:hover .context-menu-button, .top-sites-list .top-site-outer:focus .context-menu-button, .top-sites-list .top-site-outer.active .context-menu-button { transform: scale(1); opacity: 1; } .top-sites-list .top-site-outer .tile { @@ -258,6 +282,117 @@ main { .top-sites-list .top-site-outer .title.pinned span { padding: 0 13px; } +.sections-list .section-top-bar { + position: relative; + height: 16px; + margin-bottom: 18px; } + .sections-list .section-top-bar .section-title { + float: left; } + .sections-list .section-top-bar .section-info-option { + float: right; } + .sections-list .section-top-bar .info-option-icon { + background-image: url("assets/glyph-info-option-12.svg"); + background-size: 12px 12px; + background-repeat: no-repeat; + background-position: center; + height: 16px; + width: 16px; + display: inline-block; } + .sections-list .section-top-bar .section-info-option div { + visibility: hidden; + opacity: 0; + transition: visibility 0.2s, opacity 0.2s ease-out; + transition-delay: 0.5s; } + .sections-list .section-top-bar .section-info-option:hover div { + visibility: visible; + opacity: 1; + transition: visibility 0.2s, opacity 0.2s ease-out; } + .sections-list .section-top-bar .info-option { + z-index: 9999; + position: absolute; + background: #FFF; + border: solid 1px rgba(0, 0, 0, 0.1); + border-radius: 3px; + font-size: 13px; + color: #0C0C0D; + line-height: 120%; + width: 320px; + right: 0; + top: 34px; + margin-top: -4px; + margin-right: -4px; + padding: 24px; + -moz-user-select: none; } + .sections-list .section-top-bar .info-option-header { + font-size: 15px; + font-weight: 600; } + .sections-list .section-top-bar .info-option-body { + margin: 0; + margin-top: 12px; } + .sections-list .section-top-bar .info-option-link { + display: block; + margin-top: 12px; + color: #0A84FF; } + +.sections-list .section-list { + width: 768px; + clear: both; + margin: 0; } + +.sections-list .section-empty-state { + width: 100%; + height: 266px; + display: flex; + border: solid 1px rgba(0, 0, 0, 0.1); + border-radius: 3px; } + .sections-list .section-empty-state .empty-state { + margin: auto; + max-width: 350px; } + .sections-list .section-empty-state .empty-state .empty-state-icon { + background-size: 50px 50px; + background-repeat: no-repeat; + background-position: center; + fill: rgba(160, 160, 160, 0.4); + -moz-context-properties: fill; + height: 50px; + width: 50px; + margin: 0 auto; + display: block; } + .sections-list .section-empty-state .empty-state .empty-state-message { + margin-bottom: 0; + font-size: 13px; + font-weight: 300; + color: #A0A0A0; + text-align: center; } + +.topic { + font-size: 13px; + color: #BFC0C7; + min-width: 780px; } + .topic ul { + display: inline; + padding-left: 12px; } + .topic ul li { + display: inline; } + .topic ul li::after { + content: '•'; + padding-left: 8px; + padding-right: 8px; } + .topic ul li:last-child::after { + content: none; } + .topic .topic-link { + color: #008EA4; } + .topic .topic-read-more { + float: right; + margin-right: 40px; + color: #008EA4; } + .topic .topic-read-more-logo { + padding-right: 10px; + margin-left: 5px; + background-image: url("assets/topic-show-more-12.svg"); + background-repeat: no-repeat; + background-position-y: 2px; } + .search-wrapper { cursor: default; display: flex; @@ -516,3 +651,109 @@ main { border-radius: 3px; font-size: 14px; z-index: 11002; } + +.card-outer { + background: #FFF; + display: inline-block; + margin-inline-end: 32px; + margin-bottom: 16px; + width: 224px; + border-radius: 3px; + border-color: rgba(0, 0, 0, 0.1); + height: 266px; + position: relative; } + .card-outer .context-menu-button { + cursor: pointer; + position: absolute; + top: -13.5px; + offset-inline-end: -13.5px; + width: 27px; + height: 27px; + background-color: #FFF; + background-image: url("assets/glyph-more-16.svg"); + background-position: 65%; + background-repeat: no-repeat; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, 0.2); + border-radius: 100%; + box-shadow: 0 2px 0 rgba(0, 0, 0, 0.1); + transform: scale(0.25); + opacity: 0; + transition-property: transform, opacity; + transition-duration: 200ms; + z-index: 399; } + .card-outer .context-menu-button:focus, .card-outer .context-menu-button:active { + transform: scale(1); + opacity: 1; } + .card-outer .card { + height: 100%; + border-radius: 3px; } + .card-outer > a { + display: block; + color: inherit; + height: 100%; + outline: none; + position: absolute; } + .card-outer > a.active .card, .card-outer > a:focus .card { + box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.1), 0 0 0 5px rgba(0, 0, 0, 0.1); + transition: box-shadow 150ms; } + .card-outer:hover, .card-outer:focus, .card-outer.active { + outline: none; + box-shadow: inset 0 0 0 1px rgba(0, 0, 0, 0.1), 0 0 0 5px rgba(0, 0, 0, 0.1); + transition: box-shadow 150ms; } + .card-outer:hover .context-menu-button, .card-outer:focus .context-menu-button, .card-outer.active .context-menu-button { + transform: scale(1); + opacity: 1; } + .card-outer .card-preview-image { + position: relative; + background-size: cover; + background-position: center; + background-repeat: no-repeat; + height: 122px; + border-bottom-color: rgba(0, 0, 0, 0.1); + border-bottom-style: solid; + border-bottom-width: 1px; + border-radius: 3px 3px 0 0; } + .card-outer .card-details { + padding: 10px 16px 12px; } + .card-outer .card-text { + overflow: hidden; + max-height: 78px; } + .card-outer .card-text.full-height { + max-height: 200px; } + .card-outer .card-host-name { + color: #858585; + font-size: 10px; + padding-bottom: 4px; + text-transform: uppercase; } + .card-outer .card-title { + margin: 0 0 2px; + font-size: inherit; + word-wrap: break-word; } + .card-outer .card-description { + font-size: 12px; + margin: 0; + word-wrap: break-word; + overflow: hidden; + line-height: 18px; + max-height: 34px; } + .card-outer .card-context { + padding: 16px 16px 14px 14px; + position: absolute; + bottom: 0; + left: 0; + right: 0; + color: #A0A0A0; + font-size: 11px; + display: flex; + align-items: center; } + .card-outer .card-context-icon { + opacity: 0.5; + font-size: 13px; + margin-inline-end: 6px; + display: block; } + .card-outer .card-context-label { + flex-grow: 1; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; } diff --git a/browser/extensions/activity-stream/data/content/activity-stream.html b/browser/extensions/activity-stream/data/content/activity-stream.html index 69d110e37bd42..d5c15bfd7d81f 100644 --- a/browser/extensions/activity-stream/data/content/activity-stream.html +++ b/browser/extensions/activity-stream/data/content/activity-stream.html @@ -8,6 +8,10 @@
+
+
+
+
diff --git a/browser/extensions/activity-stream/data/content/assets/glyph-historyItem-16.svg b/browser/extensions/activity-stream/data/content/assets/glyph-historyItem-16.svg new file mode 100644 index 0000000000000..05822cec1d348 --- /dev/null +++ b/browser/extensions/activity-stream/data/content/assets/glyph-historyItem-16.svg @@ -0,0 +1,6 @@ + + + + diff --git a/browser/extensions/activity-stream/data/content/assets/glyph-info-option-12.svg b/browser/extensions/activity-stream/data/content/assets/glyph-info-option-12.svg new file mode 100644 index 0000000000000..b2eef1230cb54 --- /dev/null +++ b/browser/extensions/activity-stream/data/content/assets/glyph-info-option-12.svg @@ -0,0 +1 @@ + diff --git a/browser/extensions/activity-stream/data/content/assets/glyph-now-16.svg b/browser/extensions/activity-stream/data/content/assets/glyph-now-16.svg new file mode 100644 index 0000000000000..8f1375d2088b6 --- /dev/null +++ b/browser/extensions/activity-stream/data/content/assets/glyph-now-16.svg @@ -0,0 +1,6 @@ + + + + diff --git a/browser/extensions/activity-stream/data/content/assets/glyph-pocket-16.svg b/browser/extensions/activity-stream/data/content/assets/glyph-pocket-16.svg index 1311da8fb980d..aab23f82717f8 100644 --- a/browser/extensions/activity-stream/data/content/assets/glyph-pocket-16.svg +++ b/browser/extensions/activity-stream/data/content/assets/glyph-pocket-16.svg @@ -2,5 +2,5 @@ - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. --> - - \ No newline at end of file + + diff --git a/browser/extensions/activity-stream/data/content/assets/glyph-trending-16.svg b/browser/extensions/activity-stream/data/content/assets/glyph-trending-16.svg new file mode 100644 index 0000000000000..c84c4f0617a7e --- /dev/null +++ b/browser/extensions/activity-stream/data/content/assets/glyph-trending-16.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/browser/extensions/activity-stream/data/content/assets/topic-show-more-12.svg b/browser/extensions/activity-stream/data/content/assets/topic-show-more-12.svg new file mode 100644 index 0000000000000..7e7639f4e7e63 --- /dev/null +++ b/browser/extensions/activity-stream/data/content/assets/topic-show-more-12.svg @@ -0,0 +1,12 @@ + + + + Icon / > + Created with Sketch. + + + + + + + \ No newline at end of file diff --git a/browser/extensions/activity-stream/data/locales.json b/browser/extensions/activity-stream/data/locales.json index 2bf7e23cdda87..48bbce8f03da2 100644 --- a/browser/extensions/activity-stream/data/locales.json +++ b/browser/extensions/activity-stream/data/locales.json @@ -1025,6 +1025,7 @@ "header_stories": "Top Stories", "header_visit_again": "Visit Again", "header_bookmarks": "Recent Bookmarks", + "header_recommended_by": "Recommended by {provider}", "header_bookmarks_placeholder": "You don’t have any bookmarks yet.", "header_stories_from": "from", "type_label_visited": "Visited", @@ -1051,6 +1052,7 @@ "search_header": "{search_engine_name} Search", "search_web_placeholder": "Search the Web", "search_settings": "Change Search Settings", + "section_info_option": "Info", "welcome_title": "Welcome to new tab", "welcome_body": "Firefox will use this space to show your most relevant bookmarks, articles, videos, and pages you’ve recently visited, so you can get back to them easily.", "welcome_label": "Identifying your Highlights", @@ -1095,7 +1097,8 @@ "pocket_read_even_more": "View More Stories", "pocket_feedback_header": "The best of the web, curated by over 25 million people.", "pocket_feedback_body": "Pocket, a part of the Mozilla family, will help connect you to high-quality content that you may not have found otherwise.", - "pocket_send_feedback": "Send Feedback" + "pocket_send_feedback": "Send Feedback", + "empty_state_topstories": "You’ve caught up. Check back later for more top stories from Pocket. Can’t wait? Select a popular topic to find more great stories from around the web." }, "en-ZA": {}, "eo": { diff --git a/browser/extensions/activity-stream/lib/ActivityStream.jsm b/browser/extensions/activity-stream/lib/ActivityStream.jsm index 316e622afb9e8..ba81e17f29a99 100644 --- a/browser/extensions/activity-stream/lib/ActivityStream.jsm +++ b/browser/extensions/activity-stream/lib/ActivityStream.jsm @@ -14,11 +14,34 @@ const {NewTabInit} = Cu.import("resource://activity-stream/lib/NewTabInit.jsm", const {PlacesFeed} = Cu.import("resource://activity-stream/lib/PlacesFeed.jsm", {}); const {PrefsFeed} = Cu.import("resource://activity-stream/lib/PrefsFeed.jsm", {}); const {Store} = Cu.import("resource://activity-stream/lib/Store.jsm", {}); +const {SnippetsFeed} = Cu.import("resource://activity-stream/lib/SnippetsFeed.jsm", {}); +const {SystemTickFeed} = Cu.import("resource://activity-stream/lib/SystemTickFeed.jsm", {}); const {TelemetryFeed} = Cu.import("resource://activity-stream/lib/TelemetryFeed.jsm", {}); const {TopSitesFeed} = Cu.import("resource://activity-stream/lib/TopSitesFeed.jsm", {}); +const {TopStoriesFeed} = Cu.import("resource://activity-stream/lib/TopStoriesFeed.jsm", {}); const REASON_ADDON_UNINSTALL = 6; +// Sections, keyed by section id +const SECTIONS = new Map([ + ["topstories", { + feed: TopStoriesFeed, + prefTitle: "Fetches content recommendations from a configurable content provider", + showByDefault: false + }] +]); + +const SECTION_FEEDS_CONFIG = Array.from(SECTIONS.entries()).map(entry => { + const id = entry[0]; + const {feed: Feed, prefTitle, showByDefault: value} = entry[1]; + return { + name: `section.${id}`, + factory: () => new Feed(), + title: prefTitle || `${id} section feed`, + value + }; +}); + const PREFS_CONFIG = new Map([ ["default.sites", { title: "Comma-separated list of default top sites to fill in behind visited sites", @@ -45,11 +68,24 @@ const PREFS_CONFIG = new Map([ ["telemetry.ping.endpoint", { title: "Telemetry server endpoint", value: "https://tiles.services.mozilla.com/v4/links/activity-stream" + }], + ["feeds.section.topstories.options", { + title: "Configuration options for top stories feed", + value: `{ + "stories_endpoint": "https://getpocket.com/v3/firefox/global-recs?consumer_key=$apiKey", + "topics_endpoint": "https://getpocket.com/v3/firefox/trending-topics?consumer_key=$apiKey", + "read_more_endpoint": "https://getpocket.com/explore/trending?src=ff_new_tab", + "learn_more_endpoint": "https://getpocket.com/firefox_learnmore?src=ff_newtab", + "survey_link": "https://www.surveymonkey.com/r/newtabffx", + "api_key_pref": "extensions.pocket.oAuthConsumerKey", + "provider_name": "Pocket", + "provider_icon": "pocket" + }` }] ]); const FEEDS_CONFIG = new Map(); -for (const {name, factory, title, value} of [ +for (const {name, factory, title, value} of SECTION_FEEDS_CONFIG.concat([ { name: "localization", factory: () => new LocalizationFeed(), @@ -74,6 +110,18 @@ for (const {name, factory, title, value} of [ title: "Preferences", value: true }, + { + name: "snippets", + factory: () => new SnippetsFeed(), + title: "Gets snippets data", + value: false + }, + { + name: "systemtick", + factory: () => new SystemTickFeed(), + title: "Produces system tick events to periodically check for data expiry", + value: true + }, { name: "telemetry", factory: () => new TelemetryFeed(), @@ -86,7 +134,7 @@ for (const {name, factory, title, value} of [ title: "Queries places and gets metadata for Top Sites section", value: true } -]) { +])) { const pref = `feeds.${name}`; FEEDS_CONFIG.set(pref, factory); PREFS_CONFIG.set(pref, {title, value}); @@ -135,4 +183,4 @@ this.ActivityStream = class ActivityStream { }; this.PREFS_CONFIG = PREFS_CONFIG; -this.EXPORTED_SYMBOLS = ["ActivityStream"]; +this.EXPORTED_SYMBOLS = ["ActivityStream", "SECTIONS"]; diff --git a/browser/extensions/activity-stream/lib/PlacesFeed.jsm b/browser/extensions/activity-stream/lib/PlacesFeed.jsm index c413fddcc4235..3de9568b476c5 100644 --- a/browser/extensions/activity-stream/lib/PlacesFeed.jsm +++ b/browser/extensions/activity-stream/lib/PlacesFeed.jsm @@ -13,6 +13,8 @@ XPCOMUtils.defineLazyModuleGetter(this, "NewTabUtils", "resource://gre/modules/NewTabUtils.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "PlacesUtils", "resource://gre/modules/PlacesUtils.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "Pocket", + "chrome://pocket/content/Pocket.jsm"); const LINK_BLOCKED_EVENT = "newtab-linkBlocked"; @@ -205,6 +207,9 @@ class PlacesFeed { case at.DELETE_HISTORY_URL: NewTabUtils.activityStreamLinks.deleteHistoryEntry(action.data); break; + case at.SAVE_TO_POCKET: + Pocket.savePage(action._target.browser, action.data.site.url, action.data.site.title); + break; } } } diff --git a/browser/extensions/activity-stream/lib/SnippetsFeed.jsm b/browser/extensions/activity-stream/lib/SnippetsFeed.jsm new file mode 100644 index 0000000000000..0a2ea1b78c03d --- /dev/null +++ b/browser/extensions/activity-stream/lib/SnippetsFeed.jsm @@ -0,0 +1,58 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +const {utils: Cu} = Components; + +Cu.import("resource://gre/modules/Services.jsm"); +Cu.import("resource://gre/modules/Console.jsm"); +const {actionTypes: at, actionCreators: ac} = Cu.import("resource://activity-stream/common/Actions.jsm", {}); + +// Url to fetch snippets, in the urlFormatter service format. +const SNIPPETS_URL_PREF = "browser.aboutHomeSnippets.updateUrl"; + +// Should be bumped up if the snippets content format changes. +const STARTPAGE_VERSION = 4; + +this.SnippetsFeed = class SnippetsFeed { + constructor() { + this._onUrlChange = this._onUrlChange.bind(this); + } + get snippetsURL() { + const updateURL = Services + .prefs.getStringPref(SNIPPETS_URL_PREF) + .replace("%STARTPAGE_VERSION%", STARTPAGE_VERSION); + return Services.urlFormatter.formatURL(updateURL); + } + init() { + const data = { + snippetsURL: this.snippetsURL, + version: STARTPAGE_VERSION + }; + this.store.dispatch(ac.BroadcastToContent({type: at.SNIPPETS_DATA, data})); + Services.prefs.addObserver(SNIPPETS_URL_PREF, this._onUrlChange); + } + uninit() { + this.store.dispatch({type: at.SNIPPETS_RESET}); + Services.prefs.removeObserver(SNIPPETS_URL_PREF, this._onUrlChange); + } + _onUrlChange() { + this.store.dispatch(ac.BroadcastToContent({ + type: at.SNIPPETS_DATA, + data: {snippetsURL: this.snippetsURL} + })); + } + onAction(action) { + switch (action.type) { + case at.INIT: + this.init(); + break; + case at.FEED_INIT: + if (action.data === "feeds.snippets") { this.init(); } + break; + } + } +}; + +this.EXPORTED_SYMBOLS = ["SnippetsFeed"]; diff --git a/browser/extensions/activity-stream/lib/Store.jsm b/browser/extensions/activity-stream/lib/Store.jsm index 621bd30d47ebd..2f18db4075ed1 100644 --- a/browser/extensions/activity-stream/lib/Store.jsm +++ b/browser/extensions/activity-stream/lib/Store.jsm @@ -9,6 +9,7 @@ const {ActivityStreamMessageChannel} = Cu.import("resource://activity-stream/lib const {Prefs} = Cu.import("resource://activity-stream/lib/ActivityStreamPrefs.jsm", {}); const {reducers} = Cu.import("resource://activity-stream/common/Reducers.jsm", {}); const {redux} = Cu.import("resource://activity-stream/vendor/Redux.jsm", {}); +const {actionTypes: at} = Cu.import("resource://activity-stream/common/Actions.jsm", {}); /** * Store - This has a similar structure to a redux store, but includes some extra @@ -91,6 +92,7 @@ this.Store = class Store { if (this._feedFactories.has(name)) { if (value) { this.initFeed(name); + this.dispatch({type: at.FEED_INIT, data: name}); } else { this.uninitFeed(name); } diff --git a/browser/extensions/activity-stream/lib/SystemTickFeed.jsm b/browser/extensions/activity-stream/lib/SystemTickFeed.jsm new file mode 100644 index 0000000000000..21742eda39a96 --- /dev/null +++ b/browser/extensions/activity-stream/lib/SystemTickFeed.jsm @@ -0,0 +1,35 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +const {utils: Cu} = Components; +Cu.import("resource://gre/modules/XPCOMUtils.jsm"); + +const {actionTypes: at} = Cu.import("resource://activity-stream/common/Actions.jsm", {}); + +XPCOMUtils.defineLazyModuleGetter(this, "setInterval", "resource://gre/modules/Timer.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "clearInterval", "resource://gre/modules/Timer.jsm"); + +// Frequency at which SYSTEM_TICK events are fired +const SYSTEM_TICK_INTERVAL = 5 * 60 * 1000; + +this.SystemTickFeed = class SystemTickFeed { + init() { + this.intervalId = setInterval(() => this.store.dispatch({type: at.SYSTEM_TICK}), SYSTEM_TICK_INTERVAL); + } + + onAction(action) { + switch (action.type) { + case at.INIT: + this.init(); + break; + case at.UNINIT: + clearInterval(this.intervalId); + break; + } + } +}; + +this.SYSTEM_TICK_INTERVAL = SYSTEM_TICK_INTERVAL; +this.EXPORTED_SYMBOLS = ["SystemTickFeed", "SYSTEM_TICK_INTERVAL"]; diff --git a/browser/extensions/activity-stream/lib/TopStoriesFeed.jsm b/browser/extensions/activity-stream/lib/TopStoriesFeed.jsm new file mode 100644 index 0000000000000..57952968cc8a2 --- /dev/null +++ b/browser/extensions/activity-stream/lib/TopStoriesFeed.jsm @@ -0,0 +1,187 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +"use strict"; + +const {utils: Cu} = Components; +Cu.import("resource://gre/modules/XPCOMUtils.jsm"); +Cu.import("resource://gre/modules/Services.jsm"); +Cu.import("resource://gre/modules/NewTabUtils.jsm"); +Cu.importGlobalProperties(["fetch"]); + +const {actionCreators: ac, actionTypes: at} = Cu.import("resource://activity-stream/common/Actions.jsm", {}); +const {Prefs} = Cu.import("resource://activity-stream/lib/ActivityStreamPrefs.jsm", {}); + +const STORIES_UPDATE_TIME = 30 * 60 * 1000; // 30 minutes +const TOPICS_UPDATE_TIME = 3 * 60 * 60 * 1000; // 3 hours +const SECTION_ID = "TopStories"; + +this.TopStoriesFeed = class TopStoriesFeed { + constructor() { + this.storiesLastUpdated = 0; + this.topicsLastUpdated = 0; + } + + init() { + try { + const prefs = new Prefs(); + const options = JSON.parse(prefs.get("feeds.section.topstories.options")); + const apiKey = this._getApiKeyFromPref(options.api_key_pref); + this.stories_endpoint = this._produceUrlWithApiKey(options.stories_endpoint, apiKey); + this.topics_endpoint = this._produceUrlWithApiKey(options.topics_endpoint, apiKey); + this.read_more_endpoint = options.read_more_endpoint; + + // TODO https://github.com/mozilla/activity-stream/issues/2902 + const sectionOptions = { + id: SECTION_ID, + icon: options.provider_icon, + title: {id: "header_recommended_by", values: {provider: options.provider_name}}, + rows: [], + maxCards: 3, + contextMenuOptions: ["SaveToPocket", "Separator", "CheckBookmark", "Separator", "OpenInNewWindow", "OpenInPrivateWindow", "Separator", "BlockUrl"], + infoOption: { + header: {id: "pocket_feedback_header"}, + body: {id: "pocket_feedback_body"}, + link: { + href: options.survey_link, + id: "pocket_send_feedback" + } + }, + emptyState: { + message: {id: "empty_state_topstories"}, + icon: "check" + } + }; + this.store.dispatch(ac.BroadcastToContent({type: at.SECTION_REGISTER, data: sectionOptions})); + + this.fetchStories(); + this.fetchTopics(); + } catch (e) { + Cu.reportError(`Problem initializing top stories feed: ${e.message}`); + } + } + + uninit() { + this.store.dispatch(ac.BroadcastToContent({type: at.SECTION_DEREGISTER, data: SECTION_ID})); + } + + async fetchStories() { + if (this.stories_endpoint) { + const stories = await fetch(this.stories_endpoint) + .then(response => { + if (response.ok) { + return response.text(); + } + throw new Error(`Stories endpoint returned unexpected status: ${response.status}`); + }) + .then(body => { + let items = JSON.parse(body).list; + items = items + .filter(s => !NewTabUtils.blockedLinks.isBlocked(s.dedupe_url)) + .map(s => ({ + "guid": s.id, + "type": "trending", + "title": s.title, + "description": s.excerpt, + "image": this._normalizeUrl(s.image_src), + "url": s.dedupe_url, + "lastVisitDate": s.published_timestamp + })); + return items; + }) + .catch(error => Cu.reportError(`Failed to fetch content: ${error.message}`)); + + if (stories) { + this.dispatchUpdateEvent(this.storiesLastUpdated, + {"type": at.SECTION_ROWS_UPDATE, "data": {"id": SECTION_ID, "rows": stories}}); + this.storiesLastUpdated = Date.now(); + } + } + } + + async fetchTopics() { + if (this.topics_endpoint) { + const topics = await fetch(this.topics_endpoint) + .then(response => { + if (response.ok) { + return response.text(); + } + throw new Error(`Topics endpoint returned unexpected status: ${response.status}`); + }) + .then(body => JSON.parse(body).topics) + .catch(error => Cu.reportError(`Failed to fetch topics: ${error.message}`)); + + if (topics) { + this.dispatchUpdateEvent(this.topicsLastUpdated, + {"type": at.SECTION_ROWS_UPDATE, "data": {"id": SECTION_ID, "topics": topics, "read_more_endpoint": this.read_more_endpoint}}); + this.topicsLastUpdated = Date.now(); + } + } + } + + dispatchUpdateEvent(lastUpdated, evt) { + if (lastUpdated === 0) { + this.store.dispatch(ac.BroadcastToContent(evt)); + } else { + this.store.dispatch(evt); + } + } + + _getApiKeyFromPref(apiKeyPref) { + if (!apiKeyPref) { + return apiKeyPref; + } + + return new Prefs().get(apiKeyPref) || Services.prefs.getCharPref(apiKeyPref); + } + + _produceUrlWithApiKey(url, apiKey) { + if (!url) { + return url; + } + + if (url.includes("$apiKey") && !apiKey) { + throw new Error(`An API key was specified but none configured: ${url}`); + } + + return url.replace("$apiKey", apiKey); + } + + // Need to remove parenthesis from image URLs as React will otherwise + // fail to render them properly as part of the card template. + _normalizeUrl(url) { + if (url) { + return url.replace(/\(/g, "%28").replace(/\)/g, "%29"); + } + return url; + } + + onAction(action) { + switch (action.type) { + case at.INIT: + this.init(); + break; + case at.SYSTEM_TICK: + if (Date.now() - this.storiesLastUpdated >= STORIES_UPDATE_TIME) { + this.fetchStories(); + } + if (Date.now() - this.topicsLastUpdated >= TOPICS_UPDATE_TIME) { + this.fetchTopics(); + } + break; + case at.UNINIT: + this.uninit(); + break; + case at.FEED_INIT: + if (action.data === "feeds.section.topstories") { + this.init(); + } + break; + } + } +}; + +this.STORIES_UPDATE_TIME = STORIES_UPDATE_TIME; +this.TOPICS_UPDATE_TIME = TOPICS_UPDATE_TIME; +this.SECTION_ID = SECTION_ID; +this.EXPORTED_SYMBOLS = ["TopStoriesFeed", "STORIES_UPDATE_TIME", "TOPICS_UPDATE_TIME", "SECTION_ID"]; diff --git a/browser/extensions/activity-stream/test/functional/mochitest/browser.ini b/browser/extensions/activity-stream/test/functional/mochitest/browser.ini index baf0effcd20e3..7803f967e13f3 100644 --- a/browser/extensions/activity-stream/test/functional/mochitest/browser.ini +++ b/browser/extensions/activity-stream/test/functional/mochitest/browser.ini @@ -1,5 +1,4 @@ [DEFAULT] -skip-if=!nightly_build support-files = blue_page.html diff --git a/browser/extensions/activity-stream/test/mozinfo.json b/browser/extensions/activity-stream/test/mozinfo.json deleted file mode 100644 index 3e0fd8e1ebd12..0000000000000 --- a/browser/extensions/activity-stream/test/mozinfo.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "activity_stream": true -} diff --git a/browser/extensions/activity-stream/test/unit/common/Reducers.test.js b/browser/extensions/activity-stream/test/unit/common/Reducers.test.js index 45685929002bf..60d796bf71995 100644 --- a/browser/extensions/activity-stream/test/unit/common/Reducers.test.js +++ b/browser/extensions/activity-stream/test/unit/common/Reducers.test.js @@ -1,5 +1,6 @@ const {reducers, INITIAL_STATE, insertPinned} = require("common/Reducers.jsm"); -const {TopSites, App, Prefs, Dialog} = reducers; +const {TopSites, App, Snippets, Prefs, Dialog, Sections} = reducers; + const {actionTypes: at} = require("common/Actions.jsm"); describe("Reducers", () => { @@ -77,6 +78,10 @@ describe("Reducers", () => { // old row is unchanged assert.equal(nextState.rows[0], oldState.rows[0]); }); + it("should not update state for empty action.data on PLACES_BOOKMARK_ADDED", () => { + const nextState = TopSites(undefined, {type: at.PLACES_BOOKMARK_ADDED}); + assert.equal(nextState, INITIAL_STATE.TopSites); + }); it("should remove a bookmark on PLACES_BOOKMARK_REMOVED", () => { const oldState = { rows: [{url: "foo.com"}, { @@ -98,6 +103,10 @@ describe("Reducers", () => { // old row is unchanged assert.deepEqual(nextState.rows[0], oldState.rows[0]); }); + it("should not update state for empty action.data on PLACES_BOOKMARK_REMOVED", () => { + const nextState = TopSites(undefined, {type: at.PLACES_BOOKMARK_REMOVED}); + assert.equal(nextState, INITIAL_STATE.TopSites); + }); it("should remove a link on PLACES_LINK_BLOCKED and PLACES_LINK_DELETED", () => { const events = [at.PLACES_LINK_BLOCKED, at.PLACES_LINK_DELETED]; events.forEach(event => { @@ -179,6 +188,70 @@ describe("Reducers", () => { assert.deepEqual(INITIAL_STATE.Dialog, nextState); }); }); + describe("Sections", () => { + let oldState; + + beforeEach(() => { + oldState = new Array(5).fill(null).map((v, i) => ({ + id: `foo_bar_${i}`, + title: `Foo Bar ${i}`, + initialized: false, + rows: [{url: "www.foo.bar"}, {url: "www.other.url"}] + })); + }); + + it("should return INITIAL_STATE by default", () => { + assert.equal(INITIAL_STATE.Sections, Sections(undefined, {type: "non_existent"})); + }); + it("should remove the correct section on SECTION_DEREGISTER", () => { + const newState = Sections(oldState, {type: at.SECTION_DEREGISTER, data: "foo_bar_2"}); + assert.lengthOf(newState, 4); + const expectedNewState = oldState.splice(2, 1) && oldState; + assert.deepEqual(newState, expectedNewState); + }); + it("should add a section on SECTION_REGISTER if it doesn't already exist", () => { + const action = {type: at.SECTION_REGISTER, data: {id: "foo_bar_5", title: "Foo Bar 5"}}; + const newState = Sections(oldState, action); + assert.lengthOf(newState, 6); + const insertedSection = newState.find(section => section.id === "foo_bar_5"); + assert.propertyVal(insertedSection, "title", action.data.title); + }); + it("should set newSection.rows === [] if no rows are provided on SECTION_REGISTER", () => { + const action = {type: at.SECTION_REGISTER, data: {id: "foo_bar_5", title: "Foo Bar 5"}}; + const newState = Sections(oldState, action); + const insertedSection = newState.find(section => section.id === "foo_bar_5"); + assert.deepEqual(insertedSection.rows, []); + }); + it("should update a section on SECTION_REGISTER if it already exists", () => { + const NEW_TITLE = "New Title"; + const action = {type: at.SECTION_REGISTER, data: {id: "foo_bar_2", title: NEW_TITLE}}; + const newState = Sections(oldState, action); + assert.lengthOf(newState, 5); + const updatedSection = newState.find(section => section.id === "foo_bar_2"); + assert.ok(updatedSection && updatedSection.title === NEW_TITLE); + }); + it("should have no effect on SECTION_ROWS_UPDATE if the id doesn't exist", () => { + const action = {type: at.SECTION_ROWS_UPDATE, data: {id: "fake_id", data: "fake_data"}}; + const newState = Sections(oldState, action); + assert.deepEqual(oldState, newState); + }); + it("should update the section rows with the correct data on SECTION_ROWS_UPDATE", () => { + const FAKE_DATA = ["some", "fake", "data"]; + const action = {type: at.SECTION_ROWS_UPDATE, data: {id: "foo_bar_2", rows: FAKE_DATA}}; + const newState = Sections(oldState, action); + const updatedSection = newState.find(section => section.id === "foo_bar_2"); + assert.equal(updatedSection.rows, FAKE_DATA); + }); + it("should remove blocked and deleted urls from all rows in all sections", () => { + const blockAction = {type: at.PLACES_LINK_BLOCKED, data: {url: "www.foo.bar"}}; + const deleteAction = {type: at.PLACES_LINK_DELETED, data: {url: "www.foo.bar"}}; + const newBlockState = Sections(oldState, blockAction); + const newDeleteState = Sections(oldState, deleteAction); + newBlockState.concat(newDeleteState).forEach(section => { + assert.deepEqual(section.rows, [{url: "www.other.url"}]); + }); + }); + }); describe("#insertPinned", () => { let links; @@ -244,4 +317,23 @@ describe("Reducers", () => { assert.equal(links.length, result.length); }); }); + describe("Snippets", () => { + it("should return INITIAL_STATE by default", () => { + assert.equal(Snippets(undefined, {type: "some_action"}), INITIAL_STATE.Snippets); + }); + it("should set initialized to true on a SNIPPETS_DATA action", () => { + const state = Snippets(undefined, {type: at.SNIPPETS_DATA, data: {}}); + assert.isTrue(state.initialized); + }); + it("should set the snippet data on a SNIPPETS_DATA action", () => { + const data = {snippetsURL: "foo.com", version: 4}; + const state = Snippets(undefined, {type: at.SNIPPETS_DATA, data}); + assert.propertyVal(state, "snippetsURL", data.snippetsURL); + assert.propertyVal(state, "version", data.version); + }); + it("should reset to the initial state on a SNIPPETS_RESET action", () => { + const state = Snippets({initalized: true, foo: "bar"}, {type: at.SNIPPETS_RESET}); + assert.equal(state, INITIAL_STATE.Snippets); + }); + }); }); diff --git a/browser/extensions/activity-stream/test/unit/lib/ActivityStream.test.js b/browser/extensions/activity-stream/test/unit/lib/ActivityStream.test.js index e2573bf342449..84f320807ccf6 100644 --- a/browser/extensions/activity-stream/test/unit/lib/ActivityStream.test.js +++ b/browser/extensions/activity-stream/test/unit/lib/ActivityStream.test.js @@ -6,17 +6,21 @@ describe("ActivityStream", () => { let sandbox; let as; let ActivityStream; + let SECTIONS; function Fake() {} beforeEach(() => { sandbox = sinon.sandbox.create(); - ({ActivityStream} = injector({ + ({ActivityStream, SECTIONS} = injector({ "lib/LocalizationFeed.jsm": {LocalizationFeed: Fake}, "lib/NewTabInit.jsm": {NewTabInit: Fake}, "lib/PlacesFeed.jsm": {PlacesFeed: Fake}, "lib/TelemetryFeed.jsm": {TelemetryFeed: Fake}, "lib/TopSitesFeed.jsm": {TopSitesFeed: Fake}, - "lib/PrefsFeed.jsm": {PrefsFeed: Fake} + "lib/PrefsFeed.jsm": {PrefsFeed: Fake}, + "lib/SnippetsFeed.jsm": {SnippetsFeed: Fake}, + "lib/TopStoriesFeed.jsm": {TopStoriesFeed: Fake}, + "lib/SystemTickFeed.jsm": {SystemTickFeed: Fake} })); as = new ActivityStream(); sandbox.stub(as.store, "init"); @@ -106,5 +110,21 @@ describe("ActivityStream", () => { const feed = as.feeds.get("feeds.prefs")(); assert.instanceOf(feed, Fake); }); + it("should create a section feed for each section in SECTIONS", () => { + // If new sections are added, their feeds will have to be added to the + // list of injected feeds above for this test to pass + SECTIONS.forEach((value, key) => { + const feed = as.feeds.get(`feeds.section.${key}`)(); + assert.instanceOf(feed, Fake); + }); + }); + it("should create a Snippets feed", () => { + const feed = as.feeds.get("feeds.snippets")(); + assert.instanceOf(feed, Fake); + }); + it("should create a SystemTick feed", () => { + const feed = as.feeds.get("feeds.systemtick")(); + assert.instanceOf(feed, Fake); + }); }); }); diff --git a/browser/extensions/activity-stream/test/unit/lib/PlacesFeed.test.js b/browser/extensions/activity-stream/test/unit/lib/PlacesFeed.test.js index 4a3cc6f65315f..56bdcdc5d5d8f 100644 --- a/browser/extensions/activity-stream/test/unit/lib/PlacesFeed.test.js +++ b/browser/extensions/activity-stream/test/unit/lib/PlacesFeed.test.js @@ -28,6 +28,7 @@ describe("PlacesFeed", () => { history: {addObserver: sandbox.spy(), removeObserver: sandbox.spy()}, bookmarks: {TYPE_BOOKMARK, addObserver: sandbox.spy(), removeObserver: sandbox.spy()} }); + globals.set("Pocket", {savePage: sandbox.spy()}); global.Components.classes["@mozilla.org/browser/nav-history-service;1"] = { getService() { return global.PlacesUtils.history; @@ -98,6 +99,10 @@ describe("PlacesFeed", () => { feed.onAction({type: at.DELETE_HISTORY_URL, data: "guava.com"}); assert.calledWith(global.NewTabUtils.activityStreamLinks.deleteHistoryEntry, "guava.com"); }); + it("should save to Pocket on SAVE_TO_POCKET", () => { + feed.onAction({type: at.SAVE_TO_POCKET, data: {site: {url: "raspberry.com", title: "raspberry"}}, _target: {browser: {}}}); + assert.calledWith(global.Pocket.savePage, {}, "raspberry.com", "raspberry"); + }); }); describe("#observe", () => { diff --git a/browser/extensions/activity-stream/test/unit/lib/SnippetsFeed.test.js b/browser/extensions/activity-stream/test/unit/lib/SnippetsFeed.test.js new file mode 100644 index 0000000000000..401c0e1381715 --- /dev/null +++ b/browser/extensions/activity-stream/test/unit/lib/SnippetsFeed.test.js @@ -0,0 +1,60 @@ +const {SnippetsFeed} = require("lib/SnippetsFeed.jsm"); +const {actionTypes: at, actionCreators: ac} = require("common/Actions.jsm"); + +describe("SnippetsFeed", () => { + let sandbox; + beforeEach(() => { + sandbox = sinon.sandbox.create(); + }); + afterEach(() => { + sandbox.restore(); + }); + it("should dispatch the right version and snippetsURL on INIT", () => { + const url = "foo.com/%STARTPAGE_VERSION%"; + sandbox.stub(global.Services.prefs, "getStringPref").returns(url); + const feed = new SnippetsFeed(); + feed.store = {dispatch: sandbox.stub()}; + + feed.onAction({type: at.INIT}); + + assert.calledWith(feed.store.dispatch, ac.BroadcastToContent({ + type: at.SNIPPETS_DATA, + data: { + snippetsURL: "foo.com/4", + version: 4 + } + })); + }); + it("should call .init when a FEED_INIT happens for feeds.snippets", () => { + const feed = new SnippetsFeed(); + sandbox.stub(feed, "init"); + feed.store = {dispatch: sandbox.stub()}; + + feed.onAction({type: at.FEED_INIT, data: "feeds.snippets"}); + + assert.calledOnce(feed.init); + }); + it("should dispatch a SNIPPETS_RESET on uninit", () => { + const feed = new SnippetsFeed(); + feed.store = {dispatch: sandbox.stub()}; + + feed.uninit(); + + assert.calledWith(feed.store.dispatch, {type: at.SNIPPETS_RESET}); + }); + describe("_onUrlChange", () => { + it("should dispatch a new snippetsURL", () => { + const url = "boo.com/%STARTPAGE_VERSION%"; + sandbox.stub(global.Services.prefs, "getStringPref").returns(url); + const feed = new SnippetsFeed(); + feed.store = {dispatch: sandbox.stub()}; + + feed._onUrlChange(); + + assert.calledWith(feed.store.dispatch, ac.BroadcastToContent({ + type: at.SNIPPETS_DATA, + data: {snippetsURL: "boo.com/4"} + })); + }); + }); +}); diff --git a/browser/extensions/activity-stream/test/unit/lib/SystemTickFeed.test.js b/browser/extensions/activity-stream/test/unit/lib/SystemTickFeed.test.js new file mode 100644 index 0000000000000..13c52dcceded2 --- /dev/null +++ b/browser/extensions/activity-stream/test/unit/lib/SystemTickFeed.test.js @@ -0,0 +1,41 @@ +"use strict"; +const injector = require("inject!lib/SystemTickFeed.jsm"); +const {actionTypes: at} = require("common/Actions.jsm"); + +describe("System Tick Feed", () => { + let SystemTickFeed; + let SYSTEM_TICK_INTERVAL; + let instance; + let clock; + + beforeEach(() => { + clock = sinon.useFakeTimers(); + + ({SystemTickFeed, SYSTEM_TICK_INTERVAL} = injector({})); + instance = new SystemTickFeed(); + instance.store = {getState() { return {}; }, dispatch() {}}; + }); + afterEach(() => { + clock.restore(); + }); + it("should create a SystemTickFeed", () => { + assert.instanceOf(instance, SystemTickFeed); + }); + it("should fire SYSTEM_TICK events at configured interval", () => { + let expectation = sinon.mock(instance.store).expects("dispatch") + .twice() + .withExactArgs({type: at.SYSTEM_TICK}); + + instance.onAction({type: at.INIT}); + clock.tick(SYSTEM_TICK_INTERVAL * 2); + expectation.verify(); + }); + it("should not fire SYSTEM_TICK events after UNINIT", () => { + let expectation = sinon.mock(instance.store).expects("dispatch") + .never(); + + instance.onAction({type: at.UNINIT}); + clock.tick(SYSTEM_TICK_INTERVAL * 2); + expectation.verify(); + }); +}); diff --git a/browser/extensions/activity-stream/test/unit/lib/TopStoriesFeed.test.js b/browser/extensions/activity-stream/test/unit/lib/TopStoriesFeed.test.js new file mode 100644 index 0000000000000..a1a0421146d93 --- /dev/null +++ b/browser/extensions/activity-stream/test/unit/lib/TopStoriesFeed.test.js @@ -0,0 +1,257 @@ +"use strict"; +const injector = require("inject!lib/TopStoriesFeed.jsm"); +const {FakePrefs} = require("test/unit/utils"); +const {actionCreators: ac, actionTypes: at} = require("common/Actions.jsm"); +const {GlobalOverrider} = require("test/unit/utils"); + +describe("Top Stories Feed", () => { + let TopStoriesFeed; + let STORIES_UPDATE_TIME; + let TOPICS_UPDATE_TIME; + let SECTION_ID; + let instance; + let clock; + let globals; + + beforeEach(() => { + FakePrefs.prototype.prefs["feeds.section.topstories.options"] = `{ + "stories_endpoint": "https://somedomain.org/stories?key=$apiKey", + "topics_endpoint": "https://somedomain.org/topics?key=$apiKey", + "survey_link": "https://www.surveymonkey.com/r/newtabffx", + "api_key_pref": "apiKeyPref", + "provider_name": "test-provider", + "provider_icon": "provider-icon" + }`; + FakePrefs.prototype.prefs.apiKeyPref = "test-api-key"; + + globals = new GlobalOverrider(); + clock = sinon.useFakeTimers(); + + ({TopStoriesFeed, STORIES_UPDATE_TIME, TOPICS_UPDATE_TIME, SECTION_ID} = injector({"lib/ActivityStreamPrefs.jsm": {Prefs: FakePrefs}})); + instance = new TopStoriesFeed(); + instance.store = {getState() { return {}; }, dispatch: sinon.spy()}; + }); + afterEach(() => { + globals.restore(); + clock.restore(); + }); + describe("#init", () => { + it("should create a TopStoriesFeed", () => { + assert.instanceOf(instance, TopStoriesFeed); + }); + it("should initialize endpoints based on prefs", () => { + instance.onAction({type: at.INIT}); + assert.equal("https://somedomain.org/stories?key=test-api-key", instance.stories_endpoint); + assert.equal("https://somedomain.org/topics?key=test-api-key", instance.topics_endpoint); + }); + it("should register section", () => { + const expectedSectionOptions = { + id: SECTION_ID, + icon: "provider-icon", + title: {id: "header_recommended_by", values: {provider: "test-provider"}}, + rows: [], + maxCards: 3, + contextMenuOptions: ["SaveToPocket", "Separator", "CheckBookmark", "Separator", "OpenInNewWindow", "OpenInPrivateWindow", "Separator", "BlockUrl"], + infoOption: { + header: {id: "pocket_feedback_header"}, + body: {id: "pocket_feedback_body"}, + link: { + href: "https://www.surveymonkey.com/r/newtabffx", + id: "pocket_send_feedback" + } + }, + emptyState: { + message: {id: "empty_state_topstories"}, + icon: "check" + } + }; + + instance.onAction({type: at.INIT}); + assert.calledOnce(instance.store.dispatch); + assert.propertyVal(instance.store.dispatch.firstCall.args[0], "type", at.SECTION_REGISTER); + assert.calledWith(instance.store.dispatch, ac.BroadcastToContent({ + type: at.SECTION_REGISTER, + data: expectedSectionOptions + })); + }); + it("should fetch stories on init", () => { + instance.fetchStories = sinon.spy(); + instance.fetchTopics = sinon.spy(); + instance.onAction({type: at.INIT}); + assert.calledOnce(instance.fetchStories); + }); + it("should fetch topics on init", () => { + instance.fetchStories = sinon.spy(); + instance.fetchTopics = sinon.spy(); + instance.onAction({type: at.INIT}); + assert.calledOnce(instance.fetchTopics); + }); + it("should not fetch if endpoint not configured", () => { + let fetchStub = globals.sandbox.stub(); + globals.set("fetch", fetchStub); + FakePrefs.prototype.prefs["feeds.section.topstories.options"] = "{}"; + instance.init(); + assert.notCalled(fetchStub); + }); + it("should report error for invalid configuration", () => { + globals.sandbox.spy(global.Components.utils, "reportError"); + FakePrefs.prototype.prefs["feeds.section.topstories.options"] = "invalid"; + instance.init(); + + assert.called(Components.utils.reportError); + }); + it("should report error for missing api key", () => { + let fakeServices = {prefs: {getCharPref: sinon.spy()}}; + globals.set("Services", fakeServices); + globals.sandbox.spy(global.Components.utils, "reportError"); + FakePrefs.prototype.prefs["feeds.section.topstories.options"] = `{ + "stories_endpoint": "https://somedomain.org/stories?key=$apiKey", + "topics_endpoint": "https://somedomain.org/topics?key=$apiKey" + }`; + instance.init(); + + assert.called(Components.utils.reportError); + }); + it("should deregister section", () => { + instance.onAction({type: at.UNINIT}); + assert.calledOnce(instance.store.dispatch); + assert.calledWith(instance.store.dispatch, ac.BroadcastToContent({ + type: at.SECTION_DEREGISTER, + data: SECTION_ID + })); + }); + it("should initialize on FEED_INIT", () => { + instance.init = sinon.spy(); + instance.onAction({type: at.FEED_INIT, data: "feeds.section.topstories"}); + assert.calledOnce(instance.init); + }); + }); + describe("#fetch", () => { + it("should fetch stories and send event", async () => { + let fetchStub = globals.sandbox.stub(); + globals.set("fetch", fetchStub); + globals.set("NewTabUtils", {blockedLinks: {isBlocked: globals.sandbox.spy()}}); + + const response = `{"list": [{"id" : "1", + "title": "title", + "excerpt": "description", + "image_src": "image-url", + "dedupe_url": "rec-url", + "published_timestamp" : "123" + }]}`; + const stories = [{ + "guid": "1", + "type": "trending", + "title": "title", + "description": "description", + "image": "image-url", + "url": "rec-url", + "lastVisitDate": "123" + }]; + + instance.stories_endpoint = "stories-endpoint"; + fetchStub.resolves({ok: true, status: 200, text: () => response}); + await instance.fetchStories(); + + assert.calledOnce(fetchStub); + assert.calledWithExactly(fetchStub, instance.stories_endpoint); + assert.calledOnce(instance.store.dispatch); + assert.propertyVal(instance.store.dispatch.firstCall.args[0], "type", at.SECTION_ROWS_UPDATE); + assert.deepEqual(instance.store.dispatch.firstCall.args[0].data.id, SECTION_ID); + assert.deepEqual(instance.store.dispatch.firstCall.args[0].data.rows, stories); + }); + it("should dispatch events", () => { + instance.dispatchUpdateEvent(123, {}); + assert.calledOnce(instance.store.dispatch); + }); + it("should report error for unexpected stories response", async () => { + let fetchStub = globals.sandbox.stub(); + globals.set("fetch", fetchStub); + globals.sandbox.spy(global.Components.utils, "reportError"); + + instance.stories_endpoint = "stories-endpoint"; + fetchStub.resolves({ok: false, status: 400}); + await instance.fetchStories(); + + assert.calledOnce(fetchStub); + assert.calledWithExactly(fetchStub, instance.stories_endpoint); + assert.notCalled(instance.store.dispatch); + assert.called(Components.utils.reportError); + }); + it("should exclude blocked (dismissed) URLs", async () => { + let fetchStub = globals.sandbox.stub(); + globals.set("fetch", fetchStub); + globals.set("NewTabUtils", {blockedLinks: {isBlocked: url => url === "blocked"}}); + + const response = `{"list": [{"dedupe_url" : "blocked"}, {"dedupe_url" : "not_blocked"}]}`; + instance.stories_endpoint = "stories-endpoint"; + fetchStub.resolves({ok: true, status: 200, text: () => response}); + await instance.fetchStories(); + + assert.calledOnce(instance.store.dispatch); + assert.propertyVal(instance.store.dispatch.firstCall.args[0], "type", at.SECTION_ROWS_UPDATE); + assert.equal(instance.store.dispatch.firstCall.args[0].data.rows.length, 1); + assert.equal(instance.store.dispatch.firstCall.args[0].data.rows[0].url, "not_blocked"); + }); + it("should fetch topics and send event", async () => { + let fetchStub = globals.sandbox.stub(); + globals.set("fetch", fetchStub); + + const response = `{"topics": [{"name" : "topic1", "url" : "url-topic1"}, {"name" : "topic2", "url" : "url-topic2"}]}`; + const topics = [{ + "name": "topic1", + "url": "url-topic1" + }, { + "name": "topic2", + "url": "url-topic2" + }]; + + instance.topics_endpoint = "topics-endpoint"; + fetchStub.resolves({ok: true, status: 200, text: () => response}); + await instance.fetchTopics(); + + assert.calledOnce(fetchStub); + assert.calledWithExactly(fetchStub, instance.topics_endpoint); + assert.calledOnce(instance.store.dispatch); + assert.propertyVal(instance.store.dispatch.firstCall.args[0], "type", at.SECTION_ROWS_UPDATE); + assert.deepEqual(instance.store.dispatch.firstCall.args[0].data.id, SECTION_ID); + assert.deepEqual(instance.store.dispatch.firstCall.args[0].data.topics, topics); + }); + it("should report error for unexpected topics response", async () => { + let fetchStub = globals.sandbox.stub(); + globals.set("fetch", fetchStub); + globals.sandbox.spy(global.Components.utils, "reportError"); + + instance.topics_endpoint = "topics-endpoint"; + fetchStub.resolves({ok: false, status: 400}); + await instance.fetchTopics(); + + assert.calledOnce(fetchStub); + assert.calledWithExactly(fetchStub, instance.topics_endpoint); + assert.notCalled(instance.store.dispatch); + assert.called(Components.utils.reportError); + }); + }); + describe("#update", () => { + it("should fetch stories after update interval", () => { + instance.fetchStories = sinon.spy(); + instance.fetchTopics = sinon.spy(); + instance.onAction({type: at.SYSTEM_TICK}); + assert.notCalled(instance.fetchStories); + + clock.tick(STORIES_UPDATE_TIME); + instance.onAction({type: at.SYSTEM_TICK}); + assert.calledOnce(instance.fetchStories); + }); + it("should fetch topics after update interval", () => { + instance.fetchStories = sinon.spy(); + instance.fetchTopics = sinon.spy(); + instance.onAction({type: at.SYSTEM_TICK}); + assert.notCalled(instance.fetchTopics); + + clock.tick(TOPICS_UPDATE_TIME); + instance.onAction({type: at.SYSTEM_TICK}); + assert.calledOnce(instance.fetchTopics); + }); + }); +}); diff --git a/browser/extensions/activity-stream/test/unit/lib/init-store.test.js b/browser/extensions/activity-stream/test/unit/lib/init-store.test.js index 3152e51ece728..4f01e6718e228 100644 --- a/browser/extensions/activity-stream/test/unit/lib/init-store.test.js +++ b/browser/extensions/activity-stream/test/unit/lib/init-store.test.js @@ -24,6 +24,16 @@ describe("initStore", () => { callback(message); assert.calledWith(store.dispatch, message.data); }); + it("should log errors from failed messages", () => { + const callback = global.addMessageListener.firstCall.args[1]; + globals.sandbox.stub(global.console, "error"); + globals.sandbox.stub(store, "dispatch").throws(Error("failed")); + + const message = {name: initStore.INCOMING_MESSAGE_NAME, data: {type: "FOO"}}; + callback(message); + + assert.calledOnce(global.console.error); + }); it("should replace the state if a MERGE_STORE_ACTION is dispatched", () => { store.dispatch({type: initStore.MERGE_STORE_ACTION, data: {number: 42}}); assert.deepEqual(store.getState(), {number: 42}); diff --git a/browser/extensions/activity-stream/test/unit/unit-entry.js b/browser/extensions/activity-stream/test/unit/unit-entry.js index 867b16349aef9..fd6eed631b285 100644 --- a/browser/extensions/activity-stream/test/unit/unit-entry.js +++ b/browser/extensions/activity-stream/test/unit/unit-entry.js @@ -29,6 +29,7 @@ overrider.set({ Preferences: FakePrefs, Services: { locale: {getRequestedLocale() {}}, + urlFormatter: {formatURL: str => str}, mm: { addMessageListener: (msg, cb) => cb(), removeMessageListener() {} @@ -39,6 +40,8 @@ overrider.set({ removeObserver() {} }, prefs: { + addObserver() {}, + removeObserver() {}, getStringPref() {}, getDefaultBranch() { return { From ffb778fbadae5df587b9d9459c5175185797d7f4 Mon Sep 17 00:00:00 2001 From: Doug Thayer Date: Thu, 20 Jul 2017 16:09:36 -0700 Subject: [PATCH 054/152] Bug 1382706 - Listen for pending-elevate in Update-Listener r=rstrong We were missing a possible status in the list for the downloaded topic. This should be all that we need to get the notification to show up. MozReview-Commit-ID: EQSccrdv30Y --HG-- extra : rebase_source : cc620f08549fb099a82fd1fb4b4aee1c6a124e2f --- toolkit/mozapps/update/UpdateListener.jsm | 1 + 1 file changed, 1 insertion(+) diff --git a/toolkit/mozapps/update/UpdateListener.jsm b/toolkit/mozapps/update/UpdateListener.jsm index 12f440acf3e1d..de73f196e58f1 100644 --- a/toolkit/mozapps/update/UpdateListener.jsm +++ b/toolkit/mozapps/update/UpdateListener.jsm @@ -163,6 +163,7 @@ var UpdateListener = { case "pending": case "applied-service": case "pending-service": + case "pending-elevate": case "success": this.clearCallbacks(); From 554036bb606710a9fa1f8f706f8466f014f8bb0b Mon Sep 17 00:00:00 2001 From: Ricky Chien Date: Thu, 20 Jul 2017 14:29:29 +0800 Subject: [PATCH 055/152] Bug 1381356 - The onboarding notification button should not wrap r=mossop MozReview-Commit-ID: L7mMLopZqdl --HG-- extra : rebase_source : 232d05713ff957e950aedecb7ba0ae9dcc81156e --- browser/extensions/onboarding/content/onboarding.css | 1 + 1 file changed, 1 insertion(+) diff --git a/browser/extensions/onboarding/content/onboarding.css b/browser/extensions/onboarding/content/onboarding.css index 89770cd67d3fc..e063130402087 100644 --- a/browser/extensions/onboarding/content/onboarding.css +++ b/browser/extensions/onboarding/content/onboarding.css @@ -536,6 +536,7 @@ padding: 10px 20px; font-size: 14px; color: #fff; + min-width: 130px; } @media all and (max-width: 960px) { From 53d1741327fb4f98b969eb832d8035392b7c5a1a Mon Sep 17 00:00:00 2001 From: Gijs Kruitbosch Date: Mon, 17 Jul 2017 12:31:35 +0100 Subject: [PATCH 056/152] Bug 1366026 - add a screenshots item to the library, r=jaws MozReview-Commit-ID: 7DpVOBgf3rw --HG-- extra : rebase_source : 59b8922d10476bbdcf2cbe882b89efa1854cd717 --- browser/themes/shared/customizableui/panelUI.inc.css | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/browser/themes/shared/customizableui/panelUI.inc.css b/browser/themes/shared/customizableui/panelUI.inc.css index ef50015b97a6d..a2ac3f9481836 100644 --- a/browser/themes/shared/customizableui/panelUI.inc.css +++ b/browser/themes/shared/customizableui/panelUI.inc.css @@ -1298,6 +1298,10 @@ photonpanelmultiview .panel-banner-item > .toolbarbutton-multiline-text { padding-inline-start: 8px; /* See '.subviewbutton-iconic > .toolbarbutton-text' rule above. */ } +photonpanelmultiview .subviewbutton-iconic > .toolbarbutton-icon { + width: 16px; +} + photonpanelmultiview .subviewbutton { -moz-context-properties: fill; fill: currentColor; From 90bb73c6720b9f423d1bd2e4e0aa572b57b38c41 Mon Sep 17 00:00:00 2001 From: Hiroyuki Ikezoe Date: Thu, 20 Jul 2017 12:53:11 +0900 Subject: [PATCH 057/152] Bug 1367278 - Call may_have_animations() for parent element in the case where the target is pseudo element. r=birtles In case of pseudo elements ElementHasAnimations is set on the parent element. updating-animation-on-pseudo-element.html fails without this patch, succeeds with this patch. MozReview-Commit-ID: HJaX7m8nV96 --HG-- extra : rebase_source : 15466f065d852ebc5fefd5d305639ba366a221f6 --- layout/reftests/css-animations/reftest.list | 2 + ...ating-animation-on-pseudo-element-ref.html | 11 ++++++ .../updating-animation-on-pseudo-element.html | 38 +++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 layout/reftests/css-animations/updating-animation-on-pseudo-element-ref.html create mode 100644 layout/reftests/css-animations/updating-animation-on-pseudo-element.html diff --git a/layout/reftests/css-animations/reftest.list b/layout/reftests/css-animations/reftest.list index 0392c15f8e320..27dd4eed256fc 100644 --- a/layout/reftests/css-animations/reftest.list +++ b/layout/reftests/css-animations/reftest.list @@ -50,3 +50,5 @@ fails == background-position-important.html background-position-ref.html # This == mask-size-after-finish-1b.html mask-anim-ref.html == mask-size-in-delay-1a.html mask-anim-ref.html == mask-size-in-delay-1b.html mask-anim-ref.html + +== updating-animation-on-pseudo-element.html updating-animation-on-pseudo-element-ref.html diff --git a/layout/reftests/css-animations/updating-animation-on-pseudo-element-ref.html b/layout/reftests/css-animations/updating-animation-on-pseudo-element-ref.html new file mode 100644 index 0000000000000..7196f11503c52 --- /dev/null +++ b/layout/reftests/css-animations/updating-animation-on-pseudo-element-ref.html @@ -0,0 +1,11 @@ + + + +
+ diff --git a/layout/reftests/css-animations/updating-animation-on-pseudo-element.html b/layout/reftests/css-animations/updating-animation-on-pseudo-element.html new file mode 100644 index 0000000000000..cae7d2723c290 --- /dev/null +++ b/layout/reftests/css-animations/updating-animation-on-pseudo-element.html @@ -0,0 +1,38 @@ + + + +
+ + From 733ab5250f887ed1935b82bb5952168d890cac25 Mon Sep 17 00:00:00 2001 From: Jared Wein Date: Thu, 20 Jul 2017 09:35:50 -0400 Subject: [PATCH 058/152] Bug 1382667 - Implement an onBeforeCommand function for CustomizableUI that will run before the command is executed. r=Gijs MozReview-Commit-ID: DGR3mo01vXP --HG-- extra : rebase_source : dbf5d63cbb888ffb4441485309ca854f34d0a613 --- .../customizableui/CustomizableUI.jsm | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/browser/components/customizableui/CustomizableUI.jsm b/browser/components/customizableui/CustomizableUI.jsm index 2f432b2cb25f4..6e1fc91f856e4 100644 --- a/browser/components/customizableui/CustomizableUI.jsm +++ b/browser/components/customizableui/CustomizableUI.jsm @@ -1521,6 +1521,14 @@ var CustomizableUIInternal = { handleWidgetCommand(aWidget, aNode, aEvent) { log.debug("handleWidgetCommand"); + if (aWidget.onBeforeCommand) { + try { + aWidget.onBeforeCommand.call(null, aEvent); + } catch (e) { + log.error(e); + } + } + if (aWidget.type == "button") { if (aWidget.onCommand) { try { @@ -2414,6 +2422,10 @@ var CustomizableUIInternal = { this.wrapWidgetEventHandler("onCreated", widget); this.wrapWidgetEventHandler("onDestroyed", widget); + if (typeof aData.onBeforeCommand == "function") { + widget.onBeforeCommand = aData.onBeforeCommand; + } + if (widget.type == "button") { widget.onCommand = typeof aData.onCommand == "function" ? aData.onCommand : @@ -3291,6 +3303,12 @@ this.CustomizableUI = { * passing the document from which it was removed. This is * useful especially for 'view' type widgets that need to * cleanup after views that were constructed on the fly. + * - onBeforeCommand(aEvt): A function that will be invoked when the user + * activates the button but before the command + * is evaluated. Useful if code needs to run to + * change the button's icon in preparation to the + * pending command action. Called for both type=button + * and type=view. * - onCommand(aEvt): Only useful for button widgets; a function that will be * invoked when the user activates the button. * - onClick(aEvt): Attached to all widgets; a function that will be invoked From 3d8a18b1a2f34826434fb8a17e6e36cc34582733 Mon Sep 17 00:00:00 2001 From: Jared Wein Date: Thu, 20 Jul 2017 09:59:22 -0400 Subject: [PATCH 059/152] Bug 1355922 - Save to Pocket should have an associated animation. r=Gijs The animations on the pocket-button and library-button are disabled if they are not in the nav-bar due to issues with getting overflow:hidden and position:absolute to work the same way outside of #nav-bar. This is on file as bug 1382894. MozReview-Commit-ID: Lq20GuocMkg --HG-- extra : rebase_source : 216557e4bbb265c222b104f2314ac7a0e00fe459 --- browser/base/content/browser.js | 14 +- browser/base/content/browser.xul | 6 +- browser/extensions/pocket/bootstrap.js | 23 +- browser/extensions/pocket/content/Pocket.jsm | 9 + browser/extensions/pocket/content/main.js | 30 + .../skin/shared/library-pocket-animation.svg | 986 ++++++++++++++++++ .../pocket/skin/shared/pocket-animation.svg | 157 +++ .../extensions/pocket/skin/shared/pocket.css | 164 +++ .../themes/shared/toolbarbutton-icons.inc.css | 13 +- toolkit/modules/BrowserUtils.jsm | 29 + 10 files changed, 1411 insertions(+), 20 deletions(-) create mode 100644 browser/extensions/pocket/skin/shared/library-pocket-animation.svg create mode 100644 browser/extensions/pocket/skin/shared/pocket-animation.svg diff --git a/browser/base/content/browser.js b/browser/base/content/browser.js index 96d4bb51939bb..1e24a68d02d25 100755 --- a/browser/base/content/browser.js +++ b/browser/base/content/browser.js @@ -4988,16 +4988,6 @@ var CombinedStopReload = { }); }, - /* This function is necessary to correctly vertically center the animation - within the toolbar, which uses -moz-pack-align:stretch; and thus a height - which is dependant on the font-size. */ - setAnimationImageHeightRelativeToToolbarButtonHeight() { - let dwu = window.getInterface(Ci.nsIDOMWindowUtils); - let toolbarItem = this.stopReloadContainer.closest(".customization-target > toolbaritem"); - let bounds = dwu.getBoundsWithoutFlushing(toolbarItem); - toolbarItem.style.setProperty("--toolbarbutton-height", bounds.height + "px"); - }, - switchToStop(aRequest, aWebProgress) { if (!this._initialized) return; @@ -5010,7 +5000,7 @@ var CombinedStopReload = { this._cancelTransition(); if (shouldAnimate) { - this.setAnimationImageHeightRelativeToToolbarButtonHeight(); + BrowserUtils.setToolbarButtonHeightProperty(this.stopReloadContainer); this.stopReloadContainer.setAttribute("animate", "true"); } else { this.stopReloadContainer.removeAttribute("animate"); @@ -5029,7 +5019,7 @@ var CombinedStopReload = { this.animate; if (shouldAnimate) { - this.setAnimationImageHeightRelativeToToolbarButtonHeight(); + BrowserUtils.setToolbarButtonHeightProperty(this.stopReloadContainer); this.stopReloadContainer.setAttribute("animate", "true"); } else { this.stopReloadContainer.removeAttribute("animate"); diff --git a/browser/base/content/browser.xul b/browser/base/content/browser.xul index f5bf89a1daca2..ada2da0ff141c 100644 --- a/browser/base/content/browser.xul +++ b/browser/base/content/browser.xul @@ -1220,7 +1220,11 @@ + label="&places.library.title;"> + + + + #endif diff --git a/browser/extensions/pocket/bootstrap.js b/browser/extensions/pocket/bootstrap.js index 98252ac4a4f9d..c39f4787d01a1 100644 --- a/browser/extensions/pocket/bootstrap.js +++ b/browser/extensions/pocket/bootstrap.js @@ -9,6 +9,7 @@ const {classes: Cc, interfaces: Ci, utils: Cu, manager: Cm} = Components; Cu.import("resource://gre/modules/XPCOMUtils.jsm"); Cu.import("resource://services-common/utils.js"); +Cu.import("resource://gre/modules/AppConstants.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "Services", "resource://gre/modules/Services.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "RecentWindow", @@ -92,6 +93,13 @@ function CreatePocketWidget(reason) { label: gPocketBundle.GetStringFromName("pocket-button.label"), tooltiptext: gPocketBundle.GetStringFromName("pocket-button.tooltiptext"), // Use forwarding functions here to avoid loading Pocket.jsm on startup: + onBeforeCommand() { + // We need to use onBeforeCommand to calculate the height + // of the pocket-button before it is opened since we need + // the height of the button to perform the animation that is + // triggered off of [open="true"]. + return Pocket.onBeforeCommand.apply(this, arguments); + }, onViewShowing() { return Pocket.onPanelViewShowing.apply(this, arguments); }, @@ -108,7 +116,20 @@ function CreatePocketWidget(reason) { panel.setAttribute("class", "panel-subview-body"); view.appendChild(panel); doc.getElementById("PanelUI-multiView").appendChild(view); - } + }, + onCreated(node) { + if (Services.prefs.getBoolPref("toolkit.cosmeticAnimations.enabled") && + AppConstants.MOZ_PHOTON_ANIMATIONS) { + let doc = node.ownerDocument; + let box = doc.createElement("box"); + box.classList.add("toolbarbutton-animatable-box"); + let image = doc.createElement("image"); + image.classList.add("toolbarbutton-animatable-image"); + box.appendChild(image); + node.appendChild(box); + node.setAttribute("animationsenabled", "true"); + } + }, }; CustomizableUI.createWidget(pocketButton); diff --git a/browser/extensions/pocket/content/Pocket.jsm b/browser/extensions/pocket/content/Pocket.jsm index 54f9cdf11b4ba..84797c135bc0c 100644 --- a/browser/extensions/pocket/content/Pocket.jsm +++ b/browser/extensions/pocket/content/Pocket.jsm @@ -10,6 +10,8 @@ this.EXPORTED_SYMBOLS = ["Pocket"]; Cu.import("resource://gre/modules/XPCOMUtils.jsm"); Cu.import("resource://gre/modules/Services.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "BrowserUtils", + "resource://gre/modules/BrowserUtils.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "CustomizableUI", "resource:///modules/CustomizableUI.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "ReaderMode", @@ -22,11 +24,18 @@ var Pocket = { /** * Functions related to the Pocket panel UI. */ + onBeforeCommand(event) { + BrowserUtils.setToolbarButtonHeightProperty(event.target); + }, + onPanelViewShowing(event) { let document = event.target.ownerDocument; let window = document.defaultView; let iframe = window.pktUI.getPanelFrame(); + let libraryButton = document.getElementById("library-button"); + BrowserUtils.setToolbarButtonHeightProperty(libraryButton); + let urlToSave = Pocket._urlToSave; let titleToSave = Pocket._titleToSave; Pocket._urlToSave = null; diff --git a/browser/extensions/pocket/content/main.js b/browser/extensions/pocket/content/main.js index 84acc10a728f0..cc2bf0e426e9e 100644 --- a/browser/extensions/pocket/content/main.js +++ b/browser/extensions/pocket/content/main.js @@ -68,6 +68,8 @@ var pktUI = (function() { var savePanelWidth = 350; var savePanelHeights = {collapsed: 153, expanded: 272}; + var _lastAddSucceeded = false; + // -- Event Handling -- // /** @@ -88,8 +90,34 @@ var pktUI = (function() { // clear the panel getPanelFrame().setAttribute("src", "about:blank"); + + if (_lastAddSucceeded) { + var libraryButton = document.getElementById("library-button"); + if (!Services.prefs.getBoolPref("toolkit.cosmeticAnimations.enabled") || + !libraryButton || + libraryButton.getAttribute("cui-areatype") == "menu-panel" || + libraryButton.getAttribute("overflowedItem") == "true" || + !libraryButton.closest("toolbar") || + libraryButton.closest("toolbar").id != "nav-bar") { + return; + } + libraryButton.removeAttribute("fade"); + libraryButton.setAttribute("animate", "pocket"); + libraryButton.addEventListener("animationend", onLibraryButtonAnimationEnd); + } } + function onLibraryButtonAnimationEnd(event) { + let doc = event.target.ownerDocument; + let libraryButton = doc.getElementById("library-button"); + if (event.animationName.startsWith("library-pocket-animation")) { + libraryButton.setAttribute("fade", "true"); + } else if (event.animationName == "library-pocket-fade") { + libraryButton.removeEventListener("animationend", onLibraryButtonAnimationEnd); + libraryButton.removeAttribute("animate"); + libraryButton.removeAttribute("fade"); + } + } // -- Communication to API -- // @@ -197,6 +225,7 @@ var pktUI = (function() { var panelId = showPanel("about:pocket-saved?pockethost=" + Services.prefs.getCharPref("extensions.pocket.site") + "&premiumStatus=" + (pktApi.isPremiumUser() ? "1" : "0") + "&inoverflowmenu=" + inOverflowMenu + "&locale=" + getUILocale(), { onShow() { var saveLinkMessageId = "saveLink"; + _lastAddSucceeded = false; // Send error message for invalid url if (!isValidURL) { @@ -228,6 +257,7 @@ var pktUI = (function() { item }; pktUIMessaging.sendMessageToPanel(panelId, saveLinkMessageId, successResponse); + _lastAddSucceeded = true; }, error(error, request) { // If user is not authorized show singup page diff --git a/browser/extensions/pocket/skin/shared/library-pocket-animation.svg b/browser/extensions/pocket/skin/shared/library-pocket-animation.svg new file mode 100644 index 0000000000000..32d499748cdfc --- /dev/null +++ b/browser/extensions/pocket/skin/shared/library-pocket-animation.svg @@ -0,0 +1,986 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/browser/extensions/pocket/skin/shared/pocket-animation.svg b/browser/extensions/pocket/skin/shared/pocket-animation.svg new file mode 100644 index 0000000000000..0987c07beab94 --- /dev/null +++ b/browser/extensions/pocket/skin/shared/pocket-animation.svg @@ -0,0 +1,157 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/browser/extensions/pocket/skin/shared/pocket.css b/browser/extensions/pocket/skin/shared/pocket.css index caeed25583a1b..7146af674030f 100644 --- a/browser/extensions/pocket/skin/shared/pocket.css +++ b/browser/extensions/pocket/skin/shared/pocket.css @@ -23,6 +23,170 @@ toolbar[brighttext] #pocket-button { fill: var(--toolbarbutton-icon-fill-inverted); } +#pocket-button[open="true"][animationsenabled] > .toolbarbutton-icon { + fill: transparent; +} + +@keyframes pocket-animation { + from { + transform: translateX(0); + } + to { + transform: translateX(-220px); + } +} + +@keyframes pocket-animation-rtl { + from { + transform: scaleX(-1) translateX(0); + } + to { + transform: scaleX(-1) translateX(-220px); + } +} + +#pocket-button > .toolbarbutton-animatable-box { + position: absolute; + overflow: hidden; + top: calc(50% - 9px); /* 9px is half the height of the sprite */ + /* Since .toolbarbutton-icon uses a different width than the animatable box, + we need to set a padding relative to the difference in widths. */ + margin-inline-start: calc((16px + 2 * var(--toolbarbutton-inner-padding) - 20px) / 2); + width: 20px; /* Width of each frame within the SVG sprite */ + height: 18px; /* Height of each frame within the SVG sprite */ +} + +#pocket-button > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + height: var(--toolbarbutton-height); /* Height must be equal to height of toolbarbutton padding-box */ +} + +#pocket-button[open="true"][animationsenabled][cui-areatype="toolbar"]:not([overflowedItem="true"]) { + position: relative; +} + +/* Preload pocket-animation.svg and library-pocket-animation.svg to prevent + a flicker at the start of either animation. The preloading of the library + animation is triggered off of hovering the pocket button since the pocket + button always animates before the library button. */ +#pocket-button[animationsenabled][cui-areatype="toolbar"]:not([overflowedItem="true"]):not([open="true"]):hover > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + background-image: url("chrome://pocket-shared/skin/pocket-animation.svg"), + url("chrome://pocket-shared/skin/library-pocket-animation.svg"); + background-size: 0, 0; +} + +#pocket-button[open="true"][animationsenabled][cui-areatype="toolbar"]:not([overflowedItem="true"]) > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + animation-name: pocket-animation; + animation-timing-function: steps(11); + animation-duration: 184ms; + background-image: url("chrome://pocket-shared/skin/pocket-animation.svg"); + width: 240px; +} + +#pocket-button[open="true"][animationsenabled][cui-areatype="toolbar"]:not([overflowedItem="true"]):-moz-locale-dir(rtl) > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + animation-name: pocket-animation-rtl; +} + +#library-button[animate="pocket"] > .toolbarbutton-icon { + fill: transparent; +} + +@keyframes library-pocket-animation { + from { + transform: translateX(0); + fill: inherit; + } + 25% { + fill: inherit; + } + 50% { + fill: rgb(213,32,20); + } + to { + transform: translateX(-1056px); + fill: rgb(213,32,20); + } +} + +@keyframes library-pocket-animation-rtl { + from { + transform: scaleX(-1) translateX(0); + fill: inherit; + } + 25% { + fill: inherit; + } + 50% { + fill: rgb(213,32,20); + } + to { + transform: scaleX(-1) translateX(-1056px); + fill: rgb(213,32,20); + } +} + +/* The animations for the pocket-button and library-button are disabled + outside of the nav-bar due to bug 1382894. */ +:-moz-any(#pocket-button, #library-button) > .toolbarbutton-animatable-box { + display: none; +} +#nav-bar :-moz-any(#pocket-button, #library-button) > .toolbarbutton-animatable-box { + display: -moz-box; +} + +/* We need to use an animation here instead of a transition + to guarantee that the animation succeeds. With transitions + if the starting value is already equal to the end value + then no transition will occur and thus no transitionend event. */ +@keyframes library-pocket-fade { + from { + fill: rgb(213,32,20); + } + to { + fill: inherit; + } +} + +#library-button[animate="pocket"] { + position: relative; +} + +#library-button[animate="pocket"] > .toolbarbutton-animatable-box { + position: absolute; + overflow: hidden; + top: calc(50% - 27px); /* 27px is half the height of the sprite */ + /* Since .toolbarbutton-icon uses a different width than the animatable box, + we need to set a padding relative to the difference in widths. */ + margin-inline-start: calc((16px + 2 * var(--toolbarbutton-inner-padding) - 22px) / 2); + width: 22px; /* Width of each frame within the SVG sprite */ + /* Height of each frame within the SVG sprite. The sprite must have equal amount + of space above and below the icon to allow it to vertically center with the + sprite's icon on top of the toolbar icon when using position:absolute;. */ + height: 54px; +} + +#library-button[animate="pocket"] > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + height: var(--toolbarbutton-height); /* Height must be equal to height of toolbarbutton padding-box */ +} + +#library-button[animate="pocket"] > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + background-image: url("chrome://pocket-shared/skin/library-pocket-animation.svg"); + width: 1078px; + animation-name: library-pocket-animation; + animation-duration: 768ms; + animation-timing-function: steps(48); +} + +#library-button[animate="pocket"]:-moz-locale-dir(rtl) > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + animation-name: library-pocket-animation-rtl; + transform: scaleX(-1); +} + +#library-button[animate="pocket"][fade] > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + animation-name: library-pocket-fade; + animation-duration: 2s; + animation-timing-function: ease-out; +} + #pocket-button[cui-areatype="toolbar"][open] { fill: rgb(213,32,20); } diff --git a/browser/themes/shared/toolbarbutton-icons.inc.css b/browser/themes/shared/toolbarbutton-icons.inc.css index 66f8b9aa35099..ab06850161818 100644 --- a/browser/themes/shared/toolbarbutton-icons.inc.css +++ b/browser/themes/shared/toolbarbutton-icons.inc.css @@ -17,6 +17,7 @@ toolbar[brighttext] .toolbarbutton-1 { %ifdef MOZ_PHOTON_THEME #forward-button:-moz-locale-dir(rtl) > .toolbarbutton-icon, #reload-button:-moz-locale-dir(rtl) > .toolbarbutton-icon, +#library-button:-moz-locale-dir(rtl) > .toolbarbutton-icon, %endif #nav-bar-overflow-button:-moz-locale-dir(rtl) > .toolbarbutton-icon, #panic-button:-moz-locale-dir(rtl) > .toolbarbutton-icon { @@ -37,6 +38,12 @@ toolbar[brighttext] .toolbarbutton-1 { %ifdef MOZ_PHOTON_THEME %ifdef MOZ_PHOTON_ANIMATIONS +.toolbarbutton-animatable-box > .toolbarbutton-animatable-image { + animation-fill-mode: forwards; + animation-iteration-count: 1; + list-style-image: none; +} + #stop-reload-button[animate] > #reload-button > .toolbarbutton-icon, #stop-reload-button[animate] > #reload-button[displaystop] + #stop-button > .toolbarbutton-icon { fill: transparent; @@ -104,9 +111,6 @@ toolbar[brighttext] .toolbarbutton-1 { #reload-button > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image, #stop-button > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { height: var(--toolbarbutton-height); /* Height must be equal to height of toolbarbutton padding-box */ - animation-fill-mode: forwards; - animation-iteration-count: 1; - list-style-image: none; } #stop-reload-button[animate] > #reload-button > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { @@ -365,9 +369,6 @@ toolbar:not([brighttext]) #bookmarks-menu-button@attributeSelectorForToolbar@[st #nav-bar-overflow-button > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { height: 24px; /* Height of each frame within the SVG sprite */ - animation-fill-mode: forwards; - animation-iteration-count: 1; - list-style-image: none; } #nav-bar-overflow-button[animate] > .toolbarbutton-animatable-box > .toolbarbutton-animatable-image { diff --git a/toolkit/modules/BrowserUtils.jsm b/toolkit/modules/BrowserUtils.jsm index 53a7041a8229a..e97995c3b72b9 100644 --- a/toolkit/modules/BrowserUtils.jsm +++ b/toolkit/modules/BrowserUtils.jsm @@ -366,6 +366,35 @@ this.BrowserUtils = { return !!toolbars && toolbars.has(which); }, + /** + * Sets the --toolbarbutton-button-height CSS property on the closest + * toolbar to the provided element. Useful if you need to vertically + * center a position:absolute element within a toolbar that uses + * -moz-pack-align:stretch, and thus a height which is dependant on + * the font-size. + * + * @param element An element within the toolbar whose height is desired. + * + */ + setToolbarButtonHeightProperty(element) { + let window = element.ownerGlobal; + let dwu = window.getInterface(Ci.nsIDOMWindowUtils); + let toolbarItem = element; + let urlBarContainer = element.closest("#urlbar-container"); + if (urlBarContainer) { + // The stop-reload-button, which is contained in #urlbar-container, + // needs to use #urlbar-container to calculate the bounds. + toolbarItem = urlBarContainer; + } + if (!toolbarItem) { + return; + } + let bounds = dwu.getBoundsWithoutFlushing(toolbarItem); + if (bounds.height) { + toolbarItem.style.setProperty("--toolbarbutton-height", bounds.height + "px"); + } + }, + /** * Track whether a toolbar is visible for a given a docShell. * From 916bad784eeef6a9aabc9fda22425a426f182a9b Mon Sep 17 00:00:00 2001 From: Wes Kocher Date: Thu, 20 Jul 2017 17:59:20 -0700 Subject: [PATCH 060/152] Backed out changeset 0ee08a9803fc (bug 1382849) for cctools bustage a=backout MozReview-Commit-ID: 61JTxrzOYOZ --- taskcluster/ci/toolchain/kind.yml | 1 - taskcluster/taskgraph/transforms/build_attrs.py | 6 +----- taskcluster/taskgraph/transforms/job/toolchain.py | 2 -- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/taskcluster/ci/toolchain/kind.yml b/taskcluster/ci/toolchain/kind.yml index 143f22aec35b2..23f5290fb6fe0 100644 --- a/taskcluster/ci/toolchain/kind.yml +++ b/taskcluster/ci/toolchain/kind.yml @@ -6,7 +6,6 @@ loader: taskgraph.loader.transform:loader transforms: - taskgraph.transforms.try_job:transforms - - taskgraph.transforms.build_attrs:transforms - taskgraph.transforms.toolchain:transforms - taskgraph.transforms.job:transforms - taskgraph.transforms.task:transforms diff --git a/taskcluster/taskgraph/transforms/build_attrs.py b/taskcluster/taskgraph/transforms/build_attrs.py index 4781231f259f6..56c0076148b01 100644 --- a/taskcluster/taskgraph/transforms/build_attrs.py +++ b/taskcluster/taskgraph/transforms/build_attrs.py @@ -17,11 +17,7 @@ def set_build_attributes(config, jobs): appropriately for that purpose. """ for job in jobs: - if '/' in job['name']: - build_platform, build_type = job['name'].split('/') - else: - build_platform = job['name'] - build_type = 'opt' + build_platform, build_type = job['name'].split('/') # pgo builds are represented as a different platform, type opt if build_type == 'pgo': diff --git a/taskcluster/taskgraph/transforms/job/toolchain.py b/taskcluster/taskgraph/transforms/job/toolchain.py index 1bdfaff8d29d6..bc48c29ca2236 100644 --- a/taskcluster/taskgraph/transforms/job/toolchain.py +++ b/taskcluster/taskgraph/transforms/job/toolchain.py @@ -14,7 +14,6 @@ from taskgraph.transforms.job.common import ( docker_worker_add_tc_vcs_cache, docker_worker_add_gecko_vcs_env_vars, - docker_worker_add_workspace_cache, support_vcs_checkout, ) from taskgraph.util.hash import hash_paths @@ -87,7 +86,6 @@ def docker_worker_toolchain(config, job, taskdesc): 'type': 'directory', }) - docker_worker_add_workspace_cache(config, job, taskdesc) docker_worker_add_tc_vcs_cache(config, job, taskdesc) docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc) support_vcs_checkout(config, job, taskdesc) From 60f0b17b9d8ff8d3dfa24ebf230a8622e4fb3607 Mon Sep 17 00:00:00 2001 From: Manish Goregaokar Date: Thu, 20 Jul 2017 17:22:48 -0700 Subject: [PATCH 061/152] Bug 1382672 - Add crashtest for discrete animation between system fonts on stylo; r=emilio MozReview-Commit-ID: 2nI3jgkZ0R7 --HG-- extra : rebase_source : 2c62ce6a3d7d19a96785803e255ffebfc9decfa7 --- layout/style/crashtests/1382672.html | 11 +++++++++++ layout/style/crashtests/crashtests.list | 1 + 2 files changed, 12 insertions(+) create mode 100644 layout/style/crashtests/1382672.html diff --git a/layout/style/crashtests/1382672.html b/layout/style/crashtests/1382672.html new file mode 100644 index 0000000000000..9e8eea5baec6e --- /dev/null +++ b/layout/style/crashtests/1382672.html @@ -0,0 +1,11 @@ + + + + + + + diff --git a/layout/style/crashtests/crashtests.list b/layout/style/crashtests/crashtests.list index ff16e2def04df..a7a43c5af53b7 100644 --- a/layout/style/crashtests/crashtests.list +++ b/layout/style/crashtests/crashtests.list @@ -183,3 +183,4 @@ load 1378814.html load 1380800.html load link-transition-before.html load 1381682.html +load 1382672.html From 05183c96565900a13d0df8dae8382e63fea4ae5a Mon Sep 17 00:00:00 2001 From: Alan Jeffrey Date: Thu, 20 Jul 2017 17:09:06 -0700 Subject: [PATCH 062/152] servo: Merge #17499 - Fixed scaling artefacts in paint worklets caused by zoom and hidpi (from asajeffrey:script-paint-worklets-zoom); r=glennw This PR renders paint worklet canvases at the device pixel resolution, rather than the CSS pixel resolution. It's a dependent PR, building on #17239, #17326 and #17364. --- - [X] `./mach build -d` does not report any errors - [X] `./mach test-tidy` does not report any errors - [X] These changes fix #17454 - [X] These changes do not require tests because we don't run reftests with zoom enabled Source-Repo: https://github.com/servo/servo Source-Revision: 9fcbeb3ca2ea0d11d4787c649e82918f7672694d --HG-- extra : subtree_source : https%3A//hg.mozilla.org/projects/converted-servo-linear extra : subtree_revision : 3ee9ba9b149697292aef4b26de022b90ab6f781e --- servo/Cargo.lock | 1 - servo/components/compositing/compositor.rs | 4 +- servo/components/compositing/touch.rs | 3 +- servo/components/compositing/windowing.rs | 3 +- servo/components/constellation/pipeline.rs | 3 +- .../components/layout/display_list_builder.rs | 13 +++-- servo/components/layout_thread/lib.rs | 8 ++- servo/components/script/dom/bindings/trace.rs | 9 ++- servo/components/script/dom/mediaquerylist.rs | 3 +- .../script/dom/paintrenderingcontext2d.rs | 32 ++++++++-- servo/components/script/dom/paintsize.rs | 12 ++-- .../script/dom/paintworkletglobalscope.rs | 58 +++++++++++-------- servo/components/script_traits/Cargo.toml | 1 - servo/components/script_traits/lib.rs | 12 +--- servo/components/style/context.rs | 8 +++ servo/components/style/gecko/media_queries.rs | 12 ++++ servo/components/style/servo/media_queries.rs | 15 ++++- servo/components/style_traits/lib.rs | 6 ++ servo/ports/cef/window.rs | 3 +- servo/ports/glutin/window.rs | 3 +- servo/tests/unit/style/media_queries.rs | 9 +-- servo/tests/unit/style/parsing/mod.rs | 3 +- servo/tests/unit/style/stylist.rs | 3 +- servo/tests/unit/style/viewport.rs | 13 +++-- 24 files changed, 160 insertions(+), 77 deletions(-) diff --git a/servo/Cargo.lock b/servo/Cargo.lock index b2a05f3be0fd4..10ad2c40ac66e 100644 --- a/servo/Cargo.lock +++ b/servo/Cargo.lock @@ -2558,7 +2558,6 @@ dependencies = [ name = "script_traits" version = "0.0.1" dependencies = [ - "app_units 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "bluetooth_traits 0.0.1", "canvas_traits 0.0.1", "cookie 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/servo/components/compositing/compositor.rs b/servo/components/compositing/compositor.rs index 93431755a4d8a..051454400ca96 100644 --- a/servo/components/compositing/compositor.rs +++ b/servo/components/compositing/compositor.rs @@ -16,7 +16,7 @@ use msg::constellation_msg::{PipelineId, PipelineIndex, PipelineNamespaceId, Tra use net_traits::image::base::{Image, PixelFormat}; use profile_traits::time::{self, ProfilerCategory, profile}; use script_traits::{AnimationState, AnimationTickType, ConstellationControlMsg}; -use script_traits::{ConstellationMsg, DevicePixel, LayoutControlMsg, LoadData, MouseButton}; +use script_traits::{ConstellationMsg, LayoutControlMsg, LoadData, MouseButton}; use script_traits::{MouseEventType, ScrollState}; use script_traits::{TouchpadPressurePhase, TouchEventType, TouchId, WindowSizeData, WindowSizeType}; use script_traits::CompositorEvent::{self, MouseMoveEvent, MouseButtonEvent, TouchEvent, TouchpadPressureEvent}; @@ -29,7 +29,7 @@ use std::fs::File; use std::rc::Rc; use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; -use style_traits::{CSSPixel, PinchZoomFactor}; +use style_traits::{CSSPixel, DevicePixel, PinchZoomFactor}; use style_traits::viewport::ViewportConstraints; use time::{precise_time_ns, precise_time_s}; use touch::{TouchHandler, TouchAction}; diff --git a/servo/components/compositing/touch.rs b/servo/components/compositing/touch.rs index 1edafa017c83a..0d6a7561a2b1f 100644 --- a/servo/components/compositing/touch.rs +++ b/servo/components/compositing/touch.rs @@ -4,8 +4,9 @@ use euclid::{TypedPoint2D, TypedVector2D}; use euclid::ScaleFactor; -use script_traits::{DevicePixel, EventResult, TouchId}; +use script_traits::{EventResult, TouchId}; use self::TouchState::*; +use style_traits::DevicePixel; /// Minimum number of `DeviceIndependentPixel` to begin touch scrolling. const TOUCH_PAN_MIN_SCREEN_PX: f32 = 20.0; diff --git a/servo/components/compositing/windowing.rs b/servo/components/compositing/windowing.rs index 8604b41de4659..65ab7cc1bf00d 100644 --- a/servo/components/compositing/windowing.rs +++ b/servo/components/compositing/windowing.rs @@ -11,11 +11,12 @@ use gleam::gl; use ipc_channel::ipc::IpcSender; use msg::constellation_msg::{Key, KeyModifiers, KeyState, TraversalDirection}; use net_traits::net_error_list::NetError; -use script_traits::{DevicePixel, LoadData, MouseButton, TouchEventType, TouchId, TouchpadPressurePhase}; +use script_traits::{LoadData, MouseButton, TouchEventType, TouchId, TouchpadPressurePhase}; use servo_geometry::DeviceIndependentPixel; use servo_url::ServoUrl; use std::fmt::{Debug, Error, Formatter}; use std::rc::Rc; +use style_traits::DevicePixel; use style_traits::cursor::Cursor; use webrender_api::ScrollLocation; diff --git a/servo/components/constellation/pipeline.rs b/servo/components/constellation/pipeline.rs index d6571c208c967..8c3b2f3edbca4 100644 --- a/servo/components/constellation/pipeline.rs +++ b/servo/components/constellation/pipeline.rs @@ -21,7 +21,7 @@ use net_traits::{IpcSend, ResourceThreads}; use net_traits::image_cache::ImageCache; use profile_traits::mem as profile_mem; use profile_traits::time; -use script_traits::{ConstellationControlMsg, DevicePixel, DiscardBrowsingContext}; +use script_traits::{ConstellationControlMsg, DiscardBrowsingContext}; use script_traits::{DocumentActivity, InitialScriptState}; use script_traits::{LayoutControlMsg, LayoutMsg, LoadData, MozBrowserEvent}; use script_traits::{NewLayoutInfo, SWManagerMsg, SWManagerSenders, ScriptMsg}; @@ -38,6 +38,7 @@ use std::rc::Rc; use std::sync::Arc; use std::sync::mpsc::Sender; use style_traits::CSSPixel; +use style_traits::DevicePixel; use webrender_api; use webvr_traits::WebVRMsg; diff --git a/servo/components/layout/display_list_builder.rs b/servo/components/layout/display_list_builder.rs index d993170fb0873..c7850965758f9 100644 --- a/servo/components/layout/display_list_builder.rs +++ b/servo/components/layout/display_list_builder.rs @@ -1170,7 +1170,10 @@ impl FragmentDisplayListBuilding for Fragment { // including padding, but not border or margin, so we follow suit. // https://github.com/w3c/css-houdini-drafts/issues/417 let unbordered_box = self.border_box - style.logical_border_width(); - let size = unbordered_box.size.to_physical(style.writing_mode); + let device_pixel_ratio = state.layout_context.style_context.device_pixel_ratio(); + let size_in_au = unbordered_box.size.to_physical(style.writing_mode); + let size_in_px = TypedSize2D::new(size_in_au.width.to_f32_px(), size_in_au.height.to_f32_px()); + let size_in_dpx = size_in_px * device_pixel_ratio; let name = paint_worklet.name.clone(); // Get the painter, and the computed values for its properties. @@ -1188,17 +1191,17 @@ impl FragmentDisplayListBuilding for Fragment { // TODO: add a one-place cache to avoid drawing the paint image every time. // https://github.com/servo/servo/issues/17369 - debug!("Drawing a paint image {}({},{}).", name, size.width.to_px(), size.height.to_px()); + debug!("Drawing a paint image {}({},{}).", name, size_in_px.width, size_in_px.height); let (sender, receiver) = ipc::channel().unwrap(); - painter.draw_a_paint_image(size, properties, sender); + painter.draw_a_paint_image(size_in_px, device_pixel_ratio, properties, sender); // TODO: timeout let webrender_image = match receiver.recv() { Ok(CanvasData::Image(canvas_data)) => { WebRenderImageInfo { // TODO: it would be nice to get this data back from the canvas - width: size.width.to_px().abs() as u32, - height: size.height.to_px().abs() as u32, + width: size_in_dpx.width as u32, + height: size_in_dpx.height as u32, format: PixelFormat::BGRA8, key: Some(canvas_data.image_key), } diff --git a/servo/components/layout_thread/lib.rs b/servo/components/layout_thread/lib.rs index 5faf114740bb8..2f0fa57e5a300 100644 --- a/servo/components/layout_thread/lib.rs +++ b/servo/components/layout_thread/lib.rs @@ -462,9 +462,12 @@ impl LayoutThread { layout_threads: usize, paint_time_metrics: PaintTimeMetrics) -> LayoutThread { + // The device pixel ratio is incorrect (it does not have the hidpi value), + // but it will be set correctly when the initial reflow takes place. let device = Device::new( MediaType::Screen, - opts::get().initial_window_size.to_f32() * ScaleFactor::new(1.0)); + opts::get().initial_window_size.to_f32() * ScaleFactor::new(1.0), + ScaleFactor::new(opts::get().device_pixels_per_px.unwrap_or(1.0))); let configuration = rayon::Configuration::new().num_threads(layout_threads); @@ -1125,6 +1128,7 @@ impl LayoutThread { trace!("{:?}", ShowSubtree(element.as_node())); let initial_viewport = data.window_size.initial_viewport; + let device_pixel_ratio = data.window_size.device_pixel_ratio; let old_viewport_size = self.viewport_size; let current_screen_size = Size2D::new(Au::from_f32_px(initial_viewport.width), Au::from_f32_px(initial_viewport.height)); @@ -1134,7 +1138,7 @@ impl LayoutThread { let document_shared_lock = document.style_shared_lock(); self.document_shared_lock = Some(document_shared_lock.clone()); let author_guard = document_shared_lock.read(); - let device = Device::new(MediaType::Screen, initial_viewport); + let device = Device::new(MediaType::Screen, initial_viewport, device_pixel_ratio); self.stylist.set_device(device, &author_guard, &data.document_stylesheets); self.viewport_size = diff --git a/servo/components/script/dom/bindings/trace.rs b/servo/components/script/dom/bindings/trace.rs index 232ef20ec167d..6ddeb49acf73c 100644 --- a/servo/components/script/dom/bindings/trace.rs +++ b/servo/components/script/dom/bindings/trace.rs @@ -43,7 +43,7 @@ use dom::bindings::str::{DOMString, USVString}; use dom::bindings::utils::WindowProxyHandler; use dom::document::PendingRestyle; use encoding::types::EncodingRef; -use euclid::{Transform2D, Transform3D, Point2D, Vector2D, Rect, Size2D}; +use euclid::{Transform2D, Transform3D, Point2D, Vector2D, Rect, Size2D, ScaleFactor}; use euclid::Length as EuclidLength; use html5ever::{Prefix, LocalName, Namespace, QualName}; use html5ever::buffer_queue::BufferQueue; @@ -484,6 +484,13 @@ unsafe impl JSTraceable for Point2D { } } +unsafe impl JSTraceable for ScaleFactor { + #[inline] + unsafe fn trace(&self, _trc: *mut JSTracer) { + // Do nothing + } +} + unsafe impl JSTraceable for Vector2D { #[inline] unsafe fn trace(&self, _trc: *mut JSTracer) { diff --git a/servo/components/script/dom/mediaquerylist.rs b/servo/components/script/dom/mediaquerylist.rs index b25e041f4ef4f..1765ef88bce57 100644 --- a/servo/components/script/dom/mediaquerylist.rs +++ b/servo/components/script/dom/mediaquerylist.rs @@ -76,7 +76,8 @@ impl MediaQueryList { pub fn evaluate(&self) -> bool { if let Some(window_size) = self.document.window().window_size() { let viewport_size = window_size.initial_viewport; - let device = Device::new(MediaType::Screen, viewport_size); + let device_pixel_ratio = window_size.device_pixel_ratio; + let device = Device::new(MediaType::Screen, viewport_size, device_pixel_ratio); self.media_query_list.evaluate(&device, self.document.quirks_mode()) } else { false diff --git a/servo/components/script/dom/paintrenderingcontext2d.rs b/servo/components/script/dom/paintrenderingcontext2d.rs index dd91ec0960e31..48ae1b186375e 100644 --- a/servo/components/script/dom/paintrenderingcontext2d.rs +++ b/servo/components/script/dom/paintrenderingcontext2d.rs @@ -2,7 +2,6 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use app_units::Au; use canvas_traits::CanvasData; use canvas_traits::CanvasMsg; use canvas_traits::FromLayoutMsg; @@ -26,12 +25,18 @@ use dom::canvaspattern::CanvasPattern; use dom::canvasrenderingcontext2d::CanvasRenderingContext2D; use dom::paintworkletglobalscope::PaintWorkletGlobalScope; use dom_struct::dom_struct; +use euclid::ScaleFactor; use euclid::Size2D; +use euclid::TypedSize2D; use ipc_channel::ipc::IpcSender; +use std::cell::Cell; +use style_traits::CSSPixel; +use style_traits::DevicePixel; #[dom_struct] pub struct PaintRenderingContext2D { context: CanvasRenderingContext2D, + device_pixel_ratio: Cell>, } impl PaintRenderingContext2D { @@ -39,6 +44,7 @@ impl PaintRenderingContext2D { let size = Size2D::zero(); PaintRenderingContext2D { context: CanvasRenderingContext2D::new_inherited(global.upcast(), None, size), + device_pixel_ratio: Cell::new(ScaleFactor::new(1.0)), } } @@ -53,9 +59,21 @@ impl PaintRenderingContext2D { let _ = self.context.ipc_renderer().send(msg); } - pub fn set_bitmap_dimensions(&self, size: Size2D) { - let size = Size2D::new(size.width.to_px(), size.height.to_px()); - self.context.set_bitmap_dimensions(size); + pub fn set_bitmap_dimensions(&self, + size: TypedSize2D, + device_pixel_ratio: ScaleFactor) + { + let size = size * device_pixel_ratio; + self.device_pixel_ratio.set(device_pixel_ratio); + self.context.set_bitmap_dimensions(size.to_untyped().to_i32()); + self.scale_by_device_pixel_ratio(); + } + + fn scale_by_device_pixel_ratio(&self) { + let device_pixel_ratio = self.device_pixel_ratio.get().get() as f64; + if device_pixel_ratio != 1.0 { + self.Scale(device_pixel_ratio, device_pixel_ratio); + } } } @@ -92,12 +110,14 @@ impl PaintRenderingContext2DMethods for PaintRenderingContext2D { // https://html.spec.whatwg.org/multipage/#dom-context-2d-settransform fn SetTransform(&self, a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) { - self.context.SetTransform(a, b, c, d, e, f) + self.context.SetTransform(a, b, c, d, e, f); + self.scale_by_device_pixel_ratio(); } // https://html.spec.whatwg.org/multipage/#dom-context-2d-resettransform fn ResetTransform(&self) { - self.context.ResetTransform() + self.context.ResetTransform(); + self.scale_by_device_pixel_ratio(); } // https://html.spec.whatwg.org/multipage/#dom-context-2d-globalalpha diff --git a/servo/components/script/dom/paintsize.rs b/servo/components/script/dom/paintsize.rs index c012850f3446c..df94bd3656623 100644 --- a/servo/components/script/dom/paintsize.rs +++ b/servo/components/script/dom/paintsize.rs @@ -2,7 +2,6 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use app_units::Au; use dom::bindings::codegen::Bindings::PaintSizeBinding; use dom::bindings::codegen::Bindings::PaintSizeBinding::PaintSizeMethods; use dom::bindings::js::Root; @@ -11,7 +10,8 @@ use dom::bindings::reflector::Reflector; use dom::bindings::reflector::reflect_dom_object; use dom::paintworkletglobalscope::PaintWorkletGlobalScope; use dom_struct::dom_struct; -use euclid::Size2D; +use euclid::TypedSize2D; +use style_traits::CSSPixel; #[dom_struct] pub struct PaintSize { @@ -21,15 +21,15 @@ pub struct PaintSize { } impl PaintSize { - fn new_inherited(size: Size2D) -> PaintSize { + fn new_inherited(size: TypedSize2D) -> PaintSize { PaintSize { reflector: Reflector::new(), - width: Finite::wrap(size.width.to_px().abs() as f64), - height: Finite::wrap(size.height.to_px().abs() as f64), + width: Finite::wrap(size.width as f64), + height: Finite::wrap(size.height as f64), } } - pub fn new(global: &PaintWorkletGlobalScope, size: Size2D) -> Root { + pub fn new(global: &PaintWorkletGlobalScope, size: TypedSize2D) -> Root { reflect_dom_object(box PaintSize::new_inherited(size), global, PaintSizeBinding::Wrap) } } diff --git a/servo/components/script/dom/paintworkletglobalscope.rs b/servo/components/script/dom/paintworkletglobalscope.rs index 95f885cd66268..058c017693f28 100644 --- a/servo/components/script/dom/paintworkletglobalscope.rs +++ b/servo/components/script/dom/paintworkletglobalscope.rs @@ -2,7 +2,6 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use app_units::Au; use canvas_traits::CanvasData; use canvas_traits::CanvasImageData; use dom::bindings::callback::CallbackContainer; @@ -27,7 +26,8 @@ use dom::workletglobalscope::WorkletGlobalScope; use dom::workletglobalscope::WorkletGlobalScopeInit; use dom::workletglobalscope::WorkletTask; use dom_struct::dom_struct; -use euclid::Size2D; +use euclid::ScaleFactor; +use euclid::TypedSize2D; use ipc_channel::ipc::IpcSender; use ipc_channel::ipc::IpcSharedMemory; use js::jsapi::Call; @@ -59,6 +59,8 @@ use std::ptr::null_mut; use std::rc::Rc; use std::sync::Arc; use std::sync::Mutex; +use style_traits::CSSPixel; +use style_traits::DevicePixel; /// https://drafts.css-houdini.org/css-paint-api/#paintworkletglobalscope #[dom_struct] @@ -94,9 +96,9 @@ impl PaintWorkletGlobalScope { pub fn perform_a_worklet_task(&self, task: PaintWorkletTask) { match task { - PaintWorkletTask::DrawAPaintImage(name, size, properties, sender) => { + PaintWorkletTask::DrawAPaintImage(name, size, device_pixel_ratio, properties, sender) => { let properties = StylePropertyMapReadOnly::from_iter(self.upcast(), properties); - self.draw_a_paint_image(name, size, &*properties, sender); + self.draw_a_paint_image(name, size, device_pixel_ratio, &*properties, sender); } } } @@ -104,25 +106,28 @@ impl PaintWorkletGlobalScope { /// https://drafts.css-houdini.org/css-paint-api/#draw-a-paint-image fn draw_a_paint_image(&self, name: Atom, - size: Size2D, + size_in_px: TypedSize2D, + device_pixel_ratio: ScaleFactor, properties: &StylePropertyMapReadOnly, sender: IpcSender) { // TODO: document paint definitions. - self.invoke_a_paint_callback(name, size, properties, sender); + self.invoke_a_paint_callback(name, size_in_px, device_pixel_ratio, properties, sender); } /// https://drafts.css-houdini.org/css-paint-api/#invoke-a-paint-callback #[allow(unsafe_code)] fn invoke_a_paint_callback(&self, name: Atom, - size: Size2D, + size_in_px: TypedSize2D, + device_pixel_ratio: ScaleFactor, properties: &StylePropertyMapReadOnly, sender: IpcSender) { - let width = size.width.to_px().abs() as u32; - let height = size.height.to_px().abs() as u32; - debug!("Invoking a paint callback {}({},{}).", name, width, height); + let size_in_dpx = size_in_px * device_pixel_ratio; + let size_in_dpx = TypedSize2D::new(size_in_dpx.width.abs() as u32, size_in_dpx.height.abs() as u32); + debug!("Invoking a paint callback {}({},{}) at {}.", + name, size_in_px.width, size_in_px.height, device_pixel_ratio); let cx = self.worklet_global.get_cx(); let _ac = JSAutoCompartment::new(cx, self.worklet_global.reflector().get_jsobject().get()); @@ -135,13 +140,13 @@ impl PaintWorkletGlobalScope { None => { // Step 2.2. warn!("Drawing un-registered paint definition {}.", name); - return self.send_invalid_image(size, sender); + return self.send_invalid_image(size_in_dpx, sender); } Some(definition) => { // Step 5.1 if !definition.constructor_valid_flag.get() { debug!("Drawing invalid paint definition {}.", name); - return self.send_invalid_image(size, sender); + return self.send_invalid_image(size_in_dpx, sender); } class_constructor.set(definition.class_constructor.get()); paint_function.set(definition.paint_function.get()); @@ -169,7 +174,7 @@ impl PaintWorkletGlobalScope { self.paint_definitions.borrow_mut().get_mut(&name) .expect("Vanishing paint definition.") .constructor_valid_flag.set(false); - return self.send_invalid_image(size, sender); + return self.send_invalid_image(size_in_dpx, sender); } // Step 5.4 entry.insert(Box::new(Heap::default())).set(paint_instance.get()); @@ -180,10 +185,10 @@ impl PaintWorkletGlobalScope { // Step 8 // TODO: the spec requires creating a new paint rendering context each time, // this code recycles the same one. - rendering_context.set_bitmap_dimensions(size); + rendering_context.set_bitmap_dimensions(size_in_px, device_pixel_ratio); // Step 9 - let paint_size = PaintSize::new(self, size); + let paint_size = PaintSize::new(self, size_in_px); // TODO: Step 10 // Steps 11-12 @@ -202,17 +207,19 @@ impl PaintWorkletGlobalScope { if unsafe { JS_IsExceptionPending(cx) } { debug!("Paint function threw an exception {}.", name); unsafe { JS_ClearPendingException(cx); } - return self.send_invalid_image(size, sender); + return self.send_invalid_image(size_in_dpx, sender); } rendering_context.send_data(sender); } - // https://drafts.csswg.org/css-images-4/#invalid-image - fn send_invalid_image(&self, size: Size2D, sender: IpcSender) { + fn send_invalid_image(&self, + size: TypedSize2D, + sender: IpcSender) + { debug!("Sending an invalid image."); - let width = size.width.to_px().abs() as u32; - let height = size.height.to_px().abs() as u32; + let width = size.width as u32; + let height = size.height as u32; let len = (width as usize) * (height as usize) * 4; let pixel = [0x00, 0x00, 0x00, 0x00]; let bytes: Vec = pixel.iter().cloned().cycle().take(len).collect(); @@ -235,12 +242,13 @@ impl PaintWorkletGlobalScope { struct WorkletPainter(Atom, Mutex); impl Painter for WorkletPainter { fn draw_a_paint_image(&self, - size: Size2D, + size: TypedSize2D, + device_pixel_ratio: ScaleFactor, properties: Vec<(Atom, String)>, sender: IpcSender) { let name = self.0.clone(); - let task = PaintWorkletTask::DrawAPaintImage(name, size, properties, sender); + let task = PaintWorkletTask::DrawAPaintImage(name, size, device_pixel_ratio, properties, sender); self.1.lock().expect("Locking a painter.") .schedule_a_worklet_task(WorkletTask::Paint(task)); } @@ -334,7 +342,11 @@ impl PaintWorkletGlobalScopeMethods for PaintWorkletGlobalScope { /// Tasks which can be peformed by a paint worklet pub enum PaintWorkletTask { - DrawAPaintImage(Atom, Size2D, Vec<(Atom, String)>, IpcSender) + DrawAPaintImage(Atom, + TypedSize2D, + ScaleFactor, + Vec<(Atom, String)>, + IpcSender) } /// A paint definition diff --git a/servo/components/script_traits/Cargo.toml b/servo/components/script_traits/Cargo.toml index 8a8965ce3e3a6..70fcdbb3f33d4 100644 --- a/servo/components/script_traits/Cargo.toml +++ b/servo/components/script_traits/Cargo.toml @@ -10,7 +10,6 @@ name = "script_traits" path = "lib.rs" [dependencies] -app_units = "0.5" bluetooth_traits = {path = "../bluetooth_traits"} canvas_traits = {path = "../canvas_traits"} cookie = "0.6" diff --git a/servo/components/script_traits/lib.rs b/servo/components/script_traits/lib.rs index 40b8532ab54a5..72af4d23108ce 100644 --- a/servo/components/script_traits/lib.rs +++ b/servo/components/script_traits/lib.rs @@ -9,7 +9,6 @@ #![deny(missing_docs)] #![deny(unsafe_code)] -extern crate app_units; extern crate bluetooth_traits; extern crate canvas_traits; extern crate cookie as cookie_rs; @@ -39,7 +38,6 @@ extern crate webvr_traits; mod script_msg; pub mod webdriver_msg; -use app_units::Au; use bluetooth_traits::BluetoothRequest; use canvas_traits::CanvasData; use devtools_traits::{DevtoolScriptControlMsg, ScriptToDevtoolsControlMsg, WorkerId}; @@ -68,6 +66,7 @@ use std::fmt; use std::sync::Arc; use std::sync::mpsc::{Receiver, Sender, RecvTimeoutError}; use style_traits::CSSPixel; +use style_traits::DevicePixel; use webdriver_msg::{LoadStatus, WebDriverScriptCommand}; use webrender_api::ClipId; use webvr_traits::{WebVREvent, WebVRMsg}; @@ -687,12 +686,6 @@ pub struct ScrollState { pub scroll_offset: Vector2D, } -/// One hardware pixel. -/// -/// This unit corresponds to the smallest addressable element of the display hardware. -#[derive(Copy, Clone, Debug)] -pub enum DevicePixel {} - /// Data about the window size. #[derive(Copy, Clone, Deserialize, Serialize, HeapSizeOf)] pub struct WindowSizeData { @@ -828,7 +821,8 @@ impl From for PaintWorkletError { pub trait Painter: Sync + Send { /// https://drafts.css-houdini.org/css-paint-api/#draw-a-paint-image fn draw_a_paint_image(&self, - concrete_object_size: Size2D, + size: TypedSize2D, + zoom: ScaleFactor, properties: Vec<(Atom, String)>, sender: IpcSender); } diff --git a/servo/components/style/context.rs b/servo/components/style/context.rs index befcd9d9e0379..e9733cb0985d1 100644 --- a/servo/components/style/context.rs +++ b/servo/components/style/context.rs @@ -11,6 +11,7 @@ use bloom::StyleBloom; use cache::LRUCache; use data::{EagerPseudoStyles, ElementData}; use dom::{OpaqueNode, TNode, TElement, SendElement}; +use euclid::ScaleFactor; use euclid::Size2D; use fnv::FnvHashMap; use font_metrics::FontMetricsProvider; @@ -27,6 +28,8 @@ use std::fmt; use std::ops; #[cfg(feature = "servo")] use std::sync::Mutex; #[cfg(feature = "servo")] use std::sync::mpsc::Sender; +use style_traits::CSSPixel; +use style_traits::DevicePixel; use stylist::Stylist; use thread_state; use time; @@ -152,6 +155,11 @@ impl<'a> SharedStyleContext<'a> { pub fn viewport_size(&self) -> Size2D { self.stylist.device().au_viewport_size() } + + /// The device pixel ratio + pub fn device_pixel_ratio(&self) -> ScaleFactor { + self.stylist.device().device_pixel_ratio() + } } /// The structure holds various intermediate inputs that are eventually used by diff --git a/servo/components/style/gecko/media_queries.rs b/servo/components/style/gecko/media_queries.rs index 922a5df637fb3..8b0e00b91fe10 100644 --- a/servo/components/style/gecko/media_queries.rs +++ b/servo/components/style/gecko/media_queries.rs @@ -4,9 +4,11 @@ //! Gecko's media-query device and expression representation. +use app_units::AU_PER_PX; use app_units::Au; use context::QuirksMode; use cssparser::{CssStringWriter, Parser, RGBA, Token, BasicParseError}; +use euclid::ScaleFactor; use euclid::Size2D; use font_metrics::get_metrics_provider_for_product; use gecko::values::convert_nscolor_to_rgba; @@ -25,6 +27,7 @@ use std::fmt::{self, Write}; use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering}; use str::starts_with_ignore_ascii_case; use string_cache::Atom; +use style_traits::{CSSPixel, DevicePixel}; use style_traits::{ToCss, ParseError, StyleParseError}; use style_traits::viewport::ViewportConstraints; use values::{CSSFloat, specified}; @@ -153,6 +156,15 @@ impl Device { }) } + /// Returns the device pixel ratio. + pub fn device_pixel_ratio(&self) -> ScaleFactor { + let override_dppx = self.pres_context().mOverrideDPPX; + if override_dppx > 0.0 { return ScaleFactor::new(override_dppx); } + let au_per_dpx = self.pres_context().mCurAppUnitsPerDevPixel as f32; + let au_per_px = AU_PER_PX as f32; + ScaleFactor::new(au_per_px / au_per_dpx) + } + /// Returns whether document colors are enabled. pub fn use_document_colors(&self) -> bool { self.pres_context().mUseDocumentColors() != 0 diff --git a/servo/components/style/servo/media_queries.rs b/servo/components/style/servo/media_queries.rs index 4c56860d2cd8c..7583a89f7da56 100644 --- a/servo/components/style/servo/media_queries.rs +++ b/servo/components/style/servo/media_queries.rs @@ -7,7 +7,7 @@ use app_units::Au; use context::QuirksMode; use cssparser::{Parser, RGBA}; -use euclid::{Size2D, TypedSize2D}; +use euclid::{ScaleFactor, Size2D, TypedSize2D}; use font_metrics::ServoMetricsProvider; use media_queries::MediaType; use parser::ParserContext; @@ -16,7 +16,7 @@ use properties::longhands::font_size; use selectors::parser::SelectorParseError; use std::fmt; use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering}; -use style_traits::{CSSPixel, ToCss, ParseError}; +use style_traits::{CSSPixel, DevicePixel, ToCss, ParseError}; use style_traits::viewport::ViewportConstraints; use values::computed::{self, ToComputedValue}; use values::specified; @@ -31,6 +31,8 @@ pub struct Device { media_type: MediaType, /// The current viewport size, in CSS pixels. viewport_size: TypedSize2D, + /// The current device pixel ratio, from CSS pixels to device pixels. + device_pixel_ratio: ScaleFactor, /// The font size of the root element /// This is set when computing the style of the root @@ -51,11 +53,13 @@ pub struct Device { impl Device { /// Trivially construct a new `Device`. pub fn new(media_type: MediaType, - viewport_size: TypedSize2D) + viewport_size: TypedSize2D, + device_pixel_ratio: ScaleFactor) -> Device { Device { media_type: media_type, viewport_size: viewport_size, + device_pixel_ratio: device_pixel_ratio, root_font_size: AtomicIsize::new(font_size::get_initial_value().0 as isize), // FIXME(bz): Seems dubious? used_root_font_size: AtomicBool::new(false), } @@ -99,6 +103,11 @@ impl Device { self.viewport_size } + /// Returns the device pixel ratio. + pub fn device_pixel_ratio(&self) -> ScaleFactor { + self.device_pixel_ratio + } + /// Take into account a viewport rule taken from the stylesheets. pub fn account_for_viewport_rule(&mut self, constraints: &ViewportConstraints) { self.viewport_size = constraints.size; diff --git a/servo/components/style_traits/lib.rs b/servo/components/style_traits/lib.rs index b0b299ec64f52..5047388ac37f7 100644 --- a/servo/components/style_traits/lib.rs +++ b/servo/components/style_traits/lib.rs @@ -59,6 +59,12 @@ impl PinchZoomFactor { #[derive(Clone, Copy, Debug)] pub enum CSSPixel {} +/// One hardware pixel. +/// +/// This unit corresponds to the smallest addressable element of the display hardware. +#[derive(Copy, Clone, Debug)] +pub enum DevicePixel {} + // In summary, the hierarchy of pixel units and the factors to convert from one to the next: // // DevicePixel diff --git a/servo/ports/cef/window.rs b/servo/ports/cef/window.rs index f38a7c11a7401..b598e35abddd2 100644 --- a/servo/ports/cef/window.rs +++ b/servo/ports/cef/window.rs @@ -23,7 +23,7 @@ use euclid::{Point2D, TypedPoint2D, TypedRect, Size2D, TypedSize2D, ScaleFactor} use gleam::gl; use msg::constellation_msg::{Key, KeyModifiers}; use net_traits::net_error_list::NetError; -use script_traits::{DevicePixel, LoadData}; +use script_traits::LoadData; use servo::ipc_channel::ipc::IpcSender; use servo_geometry::DeviceIndependentPixel; use std::cell::RefCell; @@ -34,6 +34,7 @@ use std::rc::Rc; use std::sync::mpsc::{Sender, channel}; use servo_url::ServoUrl; use style_traits::cursor::Cursor; +use style_traits::DevicePixel; #[cfg(target_os="linux")] extern crate x11; #[cfg(target_os="linux")] diff --git a/servo/ports/glutin/window.rs b/servo/ports/glutin/window.rs index b35b7d5f3c04e..b3ea8be90c0cf 100644 --- a/servo/ports/glutin/window.rs +++ b/servo/ports/glutin/window.rs @@ -24,7 +24,7 @@ use msg::constellation_msg::{ALT, CONTROL, KeyState, NONE, SHIFT, SUPER, Travers use net_traits::net_error_list::NetError; #[cfg(any(target_os = "linux", target_os = "macos"))] use osmesa_sys; -use script_traits::{DevicePixel, LoadData, TouchEventType, TouchpadPressurePhase}; +use script_traits::{LoadData, TouchEventType, TouchpadPressurePhase}; use servo::ipc_channel::ipc::IpcSender; use servo_config::opts; use servo_config::prefs::PREFS; @@ -39,6 +39,7 @@ use std::mem; use std::os::raw::c_void; use std::ptr; use std::rc::Rc; +use style_traits::DevicePixel; use style_traits::cursor::Cursor; #[cfg(target_os = "windows")] use user32; diff --git a/servo/tests/unit/style/media_queries.rs b/servo/tests/unit/style/media_queries.rs index dc24eed3594a4..0fa770f4ae68a 100644 --- a/servo/tests/unit/style/media_queries.rs +++ b/servo/tests/unit/style/media_queries.rs @@ -3,6 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::{Parser, SourcePosition}; +use euclid::ScaleFactor; use euclid::TypedSize2D; use servo_arc::Arc; use servo_url::ServoUrl; @@ -39,7 +40,7 @@ fn test_media_rule(css: &str, callback: F) let stylesheet = Stylesheet::from_str( css, url, Origin::Author, media_list, lock, None, &CSSErrorReporterTest, QuirksMode::NoQuirks, 0u64); - let dummy = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0)); + let dummy = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0), ScaleFactor::new(1.0)); let mut rule_count = 0; let guard = stylesheet.shared_lock.read(); for rule in stylesheet.iter_rules::(&dummy, &guard) { @@ -342,7 +343,7 @@ fn test_mq_malformed_expressions() { #[test] fn test_matching_simple() { - let device = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0), ScaleFactor::new(1.0)); media_query_test(&device, "@media not all { a { color: red; } }", 0); media_query_test(&device, "@media not screen { a { color: red; } }", 0); @@ -358,7 +359,7 @@ fn test_matching_simple() { #[test] fn test_matching_width() { - let device = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0), ScaleFactor::new(1.0)); media_query_test(&device, "@media { a { color: red; } }", 1); @@ -399,7 +400,7 @@ fn test_matching_width() { #[test] fn test_matching_invalid() { - let device = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(200.0, 100.0), ScaleFactor::new(1.0)); media_query_test(&device, "@media fridge { a { color: red; } }", 0); media_query_test(&device, "@media screen and (height: 100px) { a { color: red; } }", 0); diff --git a/servo/tests/unit/style/parsing/mod.rs b/servo/tests/unit/style/parsing/mod.rs index bf3f1c12e085e..ed69ee9fe70d6 100644 --- a/servo/tests/unit/style/parsing/mod.rs +++ b/servo/tests/unit/style/parsing/mod.rs @@ -5,6 +5,7 @@ //! Tests for parsing and serialization of values/properties use cssparser::{Parser, ParserInput}; +use euclid::ScaleFactor; use euclid::TypedSize2D; use media_queries::CSSErrorReporterTest; use style::context::QuirksMode; @@ -50,7 +51,7 @@ fn assert_computed_serialization(f: F, input: &'static str, output: &st { let viewport_size = TypedSize2D::new(0., 0.); let initial_style = ComputedValues::initial_values(); - let device = Device::new(MediaType::Screen, viewport_size); + let device = Device::new(MediaType::Screen, viewport_size, ScaleFactor::new(1.0)); let context = Context { is_root_element: true, diff --git a/servo/tests/unit/style/stylist.rs b/servo/tests/unit/style/stylist.rs index b1cf2ef250864..f4d319d33a460 100644 --- a/servo/tests/unit/style/stylist.rs +++ b/servo/tests/unit/style/stylist.rs @@ -3,6 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::SourceLocation; +use euclid::ScaleFactor; use euclid::TypedSize2D; use html5ever::LocalName; use selectors::parser::{AncestorHashes, Selector}; @@ -235,7 +236,7 @@ fn test_get_universal_rules() { } fn mock_stylist() -> Stylist { - let device = Device::new(MediaType::Screen, TypedSize2D::new(0f32, 0f32)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(0f32, 0f32), ScaleFactor::new(1.0)); Stylist::new(device, QuirksMode::NoQuirks) } diff --git a/servo/tests/unit/style/viewport.rs b/servo/tests/unit/style/viewport.rs index 9f71e3129255c..3affe7434c486 100644 --- a/servo/tests/unit/style/viewport.rs +++ b/servo/tests/unit/style/viewport.rs @@ -3,6 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::{Parser, ParserInput}; +use euclid::ScaleFactor; use euclid::TypedSize2D; use media_queries::CSSErrorReporterTest; use servo_arc::Arc; @@ -96,7 +97,7 @@ macro_rules! viewport_length { #[test] fn empty_viewport_rule() { - let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.), ScaleFactor::new(1.0)); test_viewport_rule("@viewport {}", &device, |declarations, css| { println!("{}", css); @@ -119,7 +120,7 @@ macro_rules! assert_decl_eq { #[test] fn simple_viewport_rules() { - let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.), ScaleFactor::new(1.0)); test_viewport_rule("@viewport { width: auto; height: auto;\ zoom: auto; min-zoom: 0; max-zoom: 200%;\ @@ -191,7 +192,7 @@ fn simple_meta_viewport_contents() { #[test] fn cascading_within_viewport_rule() { - let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.), ScaleFactor::new(1.0)); // normal order of appearance test_viewport_rule("@viewport { min-width: 200px; min-width: auto; }", @@ -257,7 +258,7 @@ fn cascading_within_viewport_rule() { #[test] fn multiple_stylesheets_cascading() { PREFS.set("layout.viewport.enabled", PrefValue::Boolean(true)); - let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); + let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.), ScaleFactor::new(1.0)); let error_reporter = CSSErrorReporterTest; let shared_lock = SharedRwLock::new(); let stylesheets = vec![ @@ -313,7 +314,7 @@ fn constrain_viewport() { } let initial_viewport = TypedSize2D::new(800., 600.); - let device = Device::new(MediaType::Screen, initial_viewport); + let device = Device::new(MediaType::Screen, initial_viewport, ScaleFactor::new(1.0)); let mut input = ParserInput::new(""); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!(input), QuirksMode::NoQuirks), None); @@ -362,7 +363,7 @@ fn constrain_viewport() { })); let initial_viewport = TypedSize2D::new(200., 150.); - let device = Device::new(MediaType::Screen, initial_viewport); + let device = Device::new(MediaType::Screen, initial_viewport, ScaleFactor::new(1.0)); let mut input = ParserInput::new("width: 320px auto"); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!(input), QuirksMode::NoQuirks), Some(ViewportConstraints { From 2fa451fa7de55d641b397752a18fd5060b0b131f Mon Sep 17 00:00:00 2001 From: JW Wang Date: Wed, 19 Jul 2017 11:24:51 +0800 Subject: [PATCH 063/152] Bug 1316211. P1 - make some functions pure virtual for they will be overridden by MFR. r=gerald MozReview-Commit-ID: 9l8MbDRjLR0 --HG-- extra : rebase_source : f055ef64dfc71e0da709aab69db0049cafe10dae extra : source : 2e5c4ae8b368b605f397455b3fede770d546fe86 --- dom/media/MediaDecoderReader.cpp | 104 ------------------------------- dom/media/MediaDecoderReader.h | 40 +++++------- 2 files changed, 16 insertions(+), 128 deletions(-) diff --git a/dom/media/MediaDecoderReader.cpp b/dom/media/MediaDecoderReader.cpp index 452f57ccdf0f5..4ef8e588f4bde 100644 --- a/dom/media/MediaDecoderReader.cpp +++ b/dom/media/MediaDecoderReader.cpp @@ -173,14 +173,6 @@ MediaDecoderReader::DecodeToFirstVideoData() return p.forget(); } -void -MediaDecoderReader::UpdateBuffered() -{ - MOZ_ASSERT(OnTaskQueue()); - NS_ENSURE_TRUE_VOID(!mShutdown); - mBuffered = GetBuffered(); -} - void MediaDecoderReader::VisibilityChanged() {} @@ -198,35 +190,6 @@ MediaDecoderReader::GetBuffered() return GetEstimatedBufferedTimeRanges(stream, mDuration->ToMicroseconds()); } -RefPtr -MediaDecoderReader::AsyncReadMetadata() -{ - MOZ_ASSERT(OnTaskQueue()); - DECODER_LOG("MediaDecoderReader::AsyncReadMetadata"); - - // Attempt to read the metadata. - MetadataHolder metadata; - metadata.mInfo = MakeUnique(); - MetadataTags* tags = nullptr; - nsresult rv = ReadMetadata(metadata.mInfo.get(), &tags); - metadata.mTags.reset(tags); - metadata.mInfo->AssertValid(); - - // Update the buffer ranges before resolving the metadata promise. Bug 1320258. - UpdateBuffered(); - - // We're not waiting for anything. If we didn't get the metadata, that's an - // error. - if (NS_FAILED(rv) || !metadata.mInfo->HasValidMedia()) { - DECODER_WARN("ReadMetadata failed, rv=%" PRIx32 " HasValidMedia=%d", - static_cast(rv), metadata.mInfo->HasValidMedia()); - return MetadataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); - } - - // Success! - return MetadataPromise::CreateAndResolve(Move(metadata), __func__); -} - class ReRequestVideoWithSkipTask : public Runnable { public: @@ -280,73 +243,6 @@ class ReRequestAudioTask : public Runnable RefPtr mReader; }; -RefPtr -MediaDecoderReader::RequestVideoData(const media::TimeUnit& aTimeThreshold) -{ - RefPtr p = mBaseVideoPromise.Ensure(__func__); - bool skip = false; - while (VideoQueue().GetSize() == 0 && - !VideoQueue().IsFinished()) { - if (!DecodeVideoFrame(skip, aTimeThreshold)) { - VideoQueue().Finish(); - } else if (skip) { - // We still need to decode more data in order to skip to the next - // keyframe. Post another task to the decode task queue to decode - // again. We don't just decode straight in a loop here, as that - // would hog the decode task queue. - RefPtr task( - new ReRequestVideoWithSkipTask(this, aTimeThreshold)); - mTaskQueue->Dispatch(task.forget()); - return p; - } - } - if (VideoQueue().GetSize() > 0) { - RefPtr v = VideoQueue().PopFront(); - mBaseVideoPromise.Resolve(v, __func__); - } else if (VideoQueue().IsFinished()) { - mBaseVideoPromise.Reject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); - } else { - MOZ_ASSERT(false, "Dropping this promise on the floor"); - } - - return p; -} - -RefPtr -MediaDecoderReader::RequestAudioData() -{ - RefPtr p = mBaseAudioPromise.Ensure(__func__); - while (AudioQueue().GetSize() == 0 && - !AudioQueue().IsFinished()) { - if (!DecodeAudioData()) { - AudioQueue().Finish(); - break; - } - // AudioQueue size is still zero, post a task to try again. Don't spin - // waiting in this while loop since it somehow prevents audio EOS from - // coming in gstreamer 1.x when there is still video buffer waiting to be - // consumed. (|mVideoSinkBufferCount| > 0) - if (AudioQueue().GetSize() == 0) { - RefPtr task(new ReRequestAudioTask(this)); - mTaskQueue->Dispatch(task.forget()); - return p; - } - } - if (AudioQueue().GetSize() > 0) { - RefPtr a = AudioQueue().PopFront(); - mBaseAudioPromise.Resolve(a, __func__); - } else if (AudioQueue().IsFinished()) { - mBaseAudioPromise.Reject(mHitAudioDecodeError - ? NS_ERROR_DOM_MEDIA_FATAL_ERR - : NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); - mHitAudioDecodeError = false; - } else { - MOZ_ASSERT(false, "Dropping this promise on the floor"); - } - - return p; -} - RefPtr MediaDecoderReader::Shutdown() { diff --git a/dom/media/MediaDecoderReader.h b/dom/media/MediaDecoderReader.h index a443cc55eec23..9ba5af65e05b3 100644 --- a/dom/media/MediaDecoderReader.h +++ b/dom/media/MediaDecoderReader.h @@ -122,7 +122,7 @@ class MediaDecoderReader // Called by MDSM in dormant state to release resources allocated by this // reader. The reader can resume decoding by calling Seek() to a specific // position. - virtual void ReleaseResources() { } + virtual void ReleaseResources() = 0; // Destroys the decoding state. The reader cannot be made usable again. // This is different from ReleaseMediaResources() as it is irreversable, @@ -137,7 +137,7 @@ class MediaDecoderReader void UpdateDuration(const media::TimeUnit& aDuration); - virtual void UpdateCompositor(already_AddRefed) {} + virtual void UpdateCompositor(already_AddRefed) = 0; // Resets all state related to decoding, emptying all buffers etc. // Cancels all pending Request*Data() request callbacks, rejects any @@ -158,35 +158,32 @@ class MediaDecoderReader // // The decode should be performed asynchronously, and the promise should // be resolved when it is complete. - virtual RefPtr RequestAudioData(); + virtual RefPtr RequestAudioData() = 0; // Requests one video sample from the reader. virtual RefPtr - RequestVideoData(const media::TimeUnit& aTimeThreshold); + RequestVideoData(const media::TimeUnit& aTimeThreshold) = 0; // By default, the state machine polls the reader once per second when it's // in buffering mode. Some readers support a promise-based mechanism by which // they notify the state machine when the data arrives. - virtual bool IsWaitForDataSupported() const { return false; } + virtual bool IsWaitForDataSupported() const = 0; - virtual RefPtr WaitForData(MediaData::Type aType) - { - MOZ_CRASH(); - } + virtual RefPtr WaitForData(MediaData::Type aType) = 0; // The default implementation of AsyncReadMetadata is implemented in terms of // synchronous ReadMetadata() calls. Implementations may also // override AsyncReadMetadata to create a more proper async implementation. - virtual RefPtr AsyncReadMetadata(); + virtual RefPtr AsyncReadMetadata() = 0; // Fills aInfo with the latest cached data required to present the media, // ReadUpdatedMetadata will always be called once ReadMetadata has succeeded. - virtual void ReadUpdatedMetadata(MediaInfo* aInfo) {} + virtual void ReadUpdatedMetadata(MediaInfo* aInfo) = 0; // Moves the decode head to aTime microseconds. virtual RefPtr Seek(const SeekTarget& aTarget) = 0; - virtual void SetCDMProxy(CDMProxy* aProxy) {} + virtual void SetCDMProxy(CDMProxy* aProxy) = 0; // Tell the reader that the data decoded are not for direct playback, so it // can accept more files, in particular those which have more channels than @@ -200,7 +197,7 @@ class MediaDecoderReader // raw media data is arriving sequentially from a network channel. This // makes sense in the